1 /*
2 * Copyright (c) 2017-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #pragma once
25
26 #include <stdio.h>
27
28 #include "arm_gemm.hpp"
29 #include "bias_adder.hpp"
30 #include "mergeresults.hpp"
31 #include "transform.hpp"
32
33 #ifdef CYCLE_PROFILING
34 #include "profiler.hpp"
35 #endif
36
37 namespace arm_gemm {
38
39 namespace {
40
41 template<typename OutputStage>
42 class run_gemv_kernel {
43 public:
44 template<typename strategy, typename Tlo, typename Tro, typename Tr>
45 static void run (
46 const strategy &strat,
47 const Tlo *A_ptr, const Tro *B_ptr, Tr *c_ptr,
48 size_t N, size_t K,
49 const Tr *bias, const Activation &act, bool Accumulate,
50 const OutputStage &os, const int32_t *col_bias, unsigned int col_base
51 );
52 };
53
54 template<>
55 template<typename strategy, typename Tlo, typename Tro, typename Tr>
run(const strategy & strat,const Tlo * A_ptr,const Tro * B_ptr,Tr * C_ptr,size_t N,size_t K,const Tr * bias,const Activation & act,bool Accumulate,const Nothing &,const int32_t *,unsigned int)56 void run_gemv_kernel<Nothing>::run(
57 const strategy &strat,
58 const Tlo *A_ptr, const Tro *B_ptr, Tr *C_ptr,
59 size_t N, size_t K,
60 const Tr *bias, const Activation &act, bool Accumulate,
61 const Nothing &, const int32_t *, unsigned int
62 ) {
63
64 strat.kernel(A_ptr, B_ptr, C_ptr, N, K, bias, act, Accumulate);
65 }
66
67 template<>
68 template<typename strategy, typename Tlo, typename Tro, typename Tr>
run(const strategy & strat,const Tlo * A_ptr,const Tro * B_ptr,Tr * C_ptr,size_t N,size_t K,const Tr *,const Activation &,bool,const Requantize32 & qp,const int32_t * col_bias,unsigned int col_base)69 void run_gemv_kernel<Requantize32>::run(
70 const strategy &strat,
71 const Tlo *A_ptr, const Tro *B_ptr, Tr *C_ptr,
72 size_t N, size_t K,
73 const Tr *, const Activation &, bool,
74 const Requantize32 &qp, const int32_t *col_bias, unsigned int col_base
75 ) {
76
77 strat.kernel(A_ptr, B_ptr, C_ptr, N, K, &qp, col_bias + col_base, col_base);
78 }
79
80 } // anonymous namespace
81
82 // Implementation of the GemmCommon abstract class.
83 //
84 // This is implementation is for GEMV with pretransposition.
85 //
86 // batches are not supported as a batched GEMV makes no sense (can be converted to a GEMM).
87 template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
88 class GemvPretransposed : public GemmCommon<To, Tr> {
89 typedef typename strategy::operand_type Toi;
90 typedef typename strategy::result_type Tri;
91
92 const GemmArgs _args;
93
94 const unsigned int _buffer_per_multi;
95
96 unsigned int k_block=0;
97 unsigned int n_block=0;
98
99 const Toi *_B_pretransposed = nullptr;
100
101 OutputStage _os;
102
103 // Pointer to the column sums (for quantized cases)
104 int32_t *col_bias = nullptr;
105
106 // Get size of the column sums
get_col_sum_size() const107 unsigned int get_col_sum_size() const {
108 if(std::is_same<OutputStage, Requantize32>::value) {
109 return _args._Nsize * _args._nmulti * sizeof(int32_t);
110 } else {
111 return 0;
112 }
113 }
114
115 public:
116 GemvPretransposed(GemvPretransposed &) = delete;
117 GemvPretransposed & operator= (GemvPretransposed &) = delete;
118
GemvPretransposed(const GemmArgs & args,const OutputStage & os={})119 GemvPretransposed(const GemmArgs &args, const OutputStage &os = {})
120 : _args(args),
121 _buffer_per_multi(roundup(args._Ksize, strategy::k_unroll()) * roundup(args._Nsize, strategy::out_width())),
122 _os(os) {
123 /* For now don't do any blocking. TODO: figure out if we should. */
124 if (strategy::supports_accumulate() && args._cfg && args._cfg->inner_block_size) {
125 k_block = args._cfg->inner_block_size;
126 } else {
127 k_block = args._Ksize;
128 }
129
130 if (args._cfg && args._cfg->outer_block_size) {
131 n_block = args._cfg->outer_block_size;
132 } else {
133 n_block = args._Nsize;
134 }
135 }
136
137 // Window is number of out_width blocks, times number of multis.
get_window_size() const138 ndrange_t get_window_size() const override {
139 return { iceildiv(_args._Nsize, strategy::out_width()) * _args._nmulti };
140 }
141
142 // Actually execute the GEMV.
execute(const ndcoord_t & work_range,const ndcoord_t &,int)143 void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override {
144 #ifdef CYCLE_PROFILING
145 profiler prof;
146 #endif
147 strategy strat(_args._ci);
148
149 const auto start = work_range.get_position(0);
150 const auto end = work_range.get_position_end(0);
151
152 /* Break the window values down into multis of interest... */
153 const unsigned int window_per_multi = iceildiv(_args._Nsize, strategy::out_width());
154 const unsigned int multi_0 = start / window_per_multi;
155 const unsigned int multi_end = end / window_per_multi;
156
157 /* ... and figure out where we start and end in the first and last multi. */
158 const unsigned int n_0 = (start - (multi_0 * window_per_multi)) * strategy::out_width();
159 const unsigned int n_max = (end - (multi_end * window_per_multi)) * strategy::out_width();
160
161 static_assert(std::is_same<Tr, Tri>::value, "GemvPretransposed: Result types must be the same.");
162
163 for (unsigned int multi=multi_0; multi<=multi_end; multi++) {
164 const unsigned int n_start = (multi==multi_0) ? n_0 : 0;
165 const unsigned int n_end = (multi==multi_end) ? n_max : _args._Nsize;
166
167 if (n_end <= n_start)
168 continue;
169
170 for (unsigned int k0=0; k0<_args._Ksize; k0+=k_block) {
171 unsigned int kmax = std::min(k0 + k_block, _args._Ksize);
172
173 for (unsigned int n=n_start; n<n_end; n+=n_block) {
174 unsigned int nmax = std::min(n + n_block, n_end);
175 #ifdef CYCLE_PROFILING
176 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (kmax-k0) * (nmax-n));
177 #endif
178 run_gemv_kernel<OutputStage>::run(strat, this->_Aptr + (multi * this->_A_multi_stride) + k0,
179 _B_pretransposed + (multi * _buffer_per_multi) + (n * roundup(_args._Ksize, strategy::k_unroll())) + (k0 * strategy::out_width()),
180 this->_Cptr + (multi * this->_C_multi_stride) + n,
181 (nmax - n), (kmax-k0),
182 this->_bias ? this->_bias + (multi * this->_bias_multi_stride) + n : nullptr,
183 _args._act, (k0 != 0),
184 _os, col_bias, n + (_args._Nsize * multi));
185 }
186 }
187 }
188 }
189
190 /* Pretransposed interface implementation */
B_is_pretransposed() const191 bool B_is_pretransposed() const override {
192 return true;
193 }
194
B_pretranspose_required() const195 bool B_pretranspose_required() const override {
196 /* Transpose is required if _B_pretransposed is still nullptr */
197 return (_B_pretransposed == nullptr);
198 }
199
get_B_pretransposed_array_size() const200 size_t get_B_pretransposed_array_size() const override {
201 return _buffer_per_multi * _args._nmulti * sizeof(To) + get_col_sum_size();
202 }
203
requantize_bias(void * in_buffer,const To * B,const int ldb,const int B_multi_stride)204 void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
205 // Column sums go on the front of the pretransposed buffer in requantized cases.
206 // We could optimize here in case we don't actually need to sum the columns, but this code is only run on setup.
207 if (std::is_same<OutputStage, Requantize32>::value) {
208 col_bias = reinterpret_cast<int32_t *>(in_buffer);
209
210 Requantize32 *qp_ptr = reinterpret_cast<Requantize32 *>(&_os);
211
212 for (unsigned int i=0; i<_args._nmulti; i++) {
213 compute_col_sums(*qp_ptr, _args._Nsize, _args._Ksize, B + (i * B_multi_stride), ldb, col_bias + (i * _args._Nsize), _args._Ksize, i, 0);
214 }
215 }
216 }
217
pretranspose_B_array(void * buffer,const To * B,const int ldb,const int B_multi_stride)218 void pretranspose_B_array(void *buffer, const To *B, const int ldb, const int B_multi_stride) override {
219 requantize_bias(buffer, B, ldb, B_multi_stride);
220
221 // The actual transposed buffer goes after the column sums (if any)
222 uintptr_t buffer_int = reinterpret_cast<uintptr_t>(buffer);
223 Toi *B_buffer = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
224
225 strategy strat(_args._ci);
226
227 for (unsigned int multi=0; multi<_args._nmulti; multi++) {
228 strat.transforms.PrepareB(B_buffer + (multi * _buffer_per_multi), B + (multi * B_multi_stride), ldb, 0, _args._Nsize, 0, _args._Ksize);
229 }
230
231 _B_pretransposed = B_buffer;
232 }
233
set_pretransposed_B_data(void * buffer)234 void set_pretransposed_B_data(void *buffer) override {
235 _B_pretransposed = reinterpret_cast<Toi *>(buffer);
236 }
237
get_config()238 GemmConfig get_config() override {
239 GemmConfig c;
240
241 c.method = GemmMethod::GEMV_PRETRANSPOSED;
242 c.inner_block_size = k_block;
243 c.outer_block_size = n_block;
244 c.filter = get_type_name<strategy>();
245
246 return c;
247 }
248 };
249
250 } // namespace arm_gemm
251