1 /*
2  * Copyright (c) 2019-2021, 2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/TensorInfo.h"
30 #include "arm_compute/core/Types.h"
31 #include "arm_compute/core/Utils.h"
32 #include "arm_compute/core/Validate.h"
33 #include "arm_compute/core/Window.h"
34 #include "src/core/NEON/NEAsymm.h"
35 #include "src/core/NEON/wrapper/wrapper.h"
36 #include "src/core/helpers/AutoConfiguration.h"
37 #include "src/core/helpers/WindowHelpers.h"
38 
39 #include <arm_neon.h>
40 
41 namespace arm_compute
42 {
43 namespace cpu
44 {
45 namespace kernels
46 {
47 namespace
48 {
load_results_input(const Iterator & mm_result_it,int32_t x)49 inline int32x4x4_t load_results_input(const Iterator &mm_result_it, int32_t x)
50 {
51     return
52     {
53         {
54             vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 0),
55             vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 4),
56             vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 8),
57             vld1q_s32(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x + 12)
58         }
59     };
60 }
61 
load(const int32_t * ptr,int32_t x)62 inline int32x4x4_t load(const int32_t *ptr, int32_t x)
63 {
64     return
65     {
66         {
67             vld1q_s32(ptr + x + 0),
68             vld1q_s32(ptr + x + 4),
69             vld1q_s32(ptr + x + 8),
70             vld1q_s32(ptr + x + 12)
71         }
72     };
73 }
74 
add_s32(int32x4x4_t a,int32x4_t b)75 inline int32x4x4_t add_s32(int32x4x4_t a, int32x4_t b)
76 {
77     return
78     {
79         {
80             vaddq_s32(a.val[0], b),
81             vaddq_s32(a.val[1], b),
82             vaddq_s32(a.val[2], b),
83             vaddq_s32(a.val[3], b)
84         }
85     };
86 }
87 
add_s32(int32x4x4_t a,int32x4x4_t b)88 inline int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t b)
89 {
90     return
91     {
92         {
93             vaddq_s32(a.val[0], b.val[0]),
94             vaddq_s32(a.val[1], b.val[1]),
95             vaddq_s32(a.val[2], b.val[2]),
96             vaddq_s32(a.val[3], b.val[3])
97         }
98     };
99 }
100 
mul_s32(int32x4x4_t & a,int32_t mul_scalar)101 inline int32x4x4_t mul_s32(int32x4x4_t &a, int32_t mul_scalar)
102 {
103     return
104     {
105         {
106             vmulq_n_s32(a.val[0], mul_scalar),
107             vmulq_n_s32(a.val[1], mul_scalar),
108             vmulq_n_s32(a.val[2], mul_scalar),
109             vmulq_n_s32(a.val[3], mul_scalar)
110         }
111     };
112 }
113 
mul_s32(int32x4x4_t & a,const int32_t * multilpier)114 inline int32x4x4_t mul_s32(int32x4x4_t &a, const int32_t *multilpier)
115 {
116     return
117     {
118         {
119             vmulq_s32(a.val[0], vld1q_s32(multilpier)),
120             vmulq_s32(a.val[1], vld1q_s32(multilpier + 4)),
121             vmulq_s32(a.val[2], vld1q_s32(multilpier + 8)),
122             vmulq_s32(a.val[3], vld1q_s32(multilpier + 12))
123         }
124     };
125 }
126 
get_a_offset(const int32_t * vector_sum_col_ptr,int32_t a_offset,int32_t x)127 inline int32x4x4_t get_a_offset(const int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x)
128 {
129     int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x);
130 
131     a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
132     a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
133     a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
134     a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
135     return a_offset_term_s32;
136 }
137 
get_b_offset(const int32_t * vector_sum_row_ptr,int32_t b_offset)138 inline int32x4_t get_b_offset(const int32_t *vector_sum_row_ptr, int32_t b_offset)
139 {
140     int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr);
141     b_offset_term_s32           = vmulq_n_s32(b_offset_term_s32, b_offset);
142     return b_offset_term_s32;
143 }
144 
get_k_offset(int32_t k_offset)145 inline int32x4x4_t get_k_offset(int32_t k_offset)
146 {
147     return
148     {
149         {
150             vdupq_n_s32(k_offset),
151             vdupq_n_s32(k_offset),
152             vdupq_n_s32(k_offset),
153             vdupq_n_s32(k_offset)
154         }
155     };
156 }
157 
finalize_quantization_floating_point(int32x4x4_t & in_s32,int32x4_t result_shift_s32,uint8x16_t min_u8,uint8x16_t max_u8,bool is_bounded_relu)158 inline uint8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8, bool is_bounded_relu)
159 {
160     const static int32x4_t zero_s32 = vdupq_n_s32(0);
161 
162     // Shift final result (negative value shift right)
163     in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
164     in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
165     in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
166     in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
167 
168     // Saturate negative values
169     in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
170     in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
171     in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
172     in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
173 
174     // Convert S32 to S16
175     const int16x8x2_t in_s16 =
176     {
177         {
178             vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
179             vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
180         }
181     };
182 
183     // Convert S16 to U8
184     uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
185 
186     if(is_bounded_relu)
187     {
188         out_u8 = vmaxq_u8(out_u8, min_u8);
189         out_u8 = vminq_u8(out_u8, max_u8);
190     }
191 
192     return out_u8;
193 }
194 
finalize_quantization_floating_point(int32x4x4_t & in_s32,int32x4_t result_shift_s32,int8x16_t min_s8,int8x16_t max_s8,bool is_bounded_relu)195 inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
196 {
197     const static int32x4_t zero_s32 = vdupq_n_s32(0);
198 
199     // Shift final result (negative value shift right)
200     in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
201     in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
202     in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
203     in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
204 
205     // Saturate negative values
206     in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
207     in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
208     in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
209     in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
210 
211     // Convert S32 to S16
212     const int16x8x2_t in_s16 =
213     {
214         {
215             vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
216             vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
217         }
218     };
219 
220     // Convert S16 to S8
221     int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
222 
223     if(is_bounded_relu)
224     {
225         out_s8 = vmaxq_s8(out_s8, min_s8);
226         out_s8 = vminq_s8(out_s8, max_s8);
227     }
228 
229     return out_s8;
230 }
231 
finalize_quantization_floating_point(int32x4x4_t & in_s32,int32x4x4_t result_shift_s32,int8x16_t min_s8,int8x16_t max_s8,bool is_bounded_relu)232 inline int8x16_t finalize_quantization_floating_point(int32x4x4_t &in_s32, int32x4x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8, bool is_bounded_relu)
233 {
234     const static int32x4_t zero_s32 = vdupq_n_s32(0);
235 
236     // Shift final result (negative value shift right)
237     in_s32.val[0] = vshlq_s32(in_s32.val[0], vnegq_s32(result_shift_s32.val[0]));
238     in_s32.val[1] = vshlq_s32(in_s32.val[1], vnegq_s32(result_shift_s32.val[1]));
239     in_s32.val[2] = vshlq_s32(in_s32.val[2], vnegq_s32(result_shift_s32.val[2]));
240     in_s32.val[3] = vshlq_s32(in_s32.val[3], vnegq_s32(result_shift_s32.val[3]));
241 
242     // Saturate negative values
243     in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
244     in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
245     in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
246     in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
247 
248     // Convert S32 to S16
249     const int16x8x2_t in_s16 =
250     {
251         {
252             vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
253             vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
254         }
255     };
256 
257     // Convert S16 to S8
258     int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
259 
260     if(is_bounded_relu)
261     {
262         out_s8 = vmaxq_s8(out_s8, min_s8);
263         out_s8 = vminq_s8(out_s8, max_s8);
264     }
265 
266     return out_s8;
267 }
268 
269 template <typename T>
270 struct VectorTyper
271 {
272     using stype = T;
273     using vtype = typename wrapper::traits::neon_bitvector_t<T, wrapper::traits::BitWidth::W128>;
274 };
275 
get_win_vector_sum(const Window & window)276 inline Window get_win_vector_sum(const Window &window)
277 {
278     Window win_vector_sum(window);
279     win_vector_sum.set(Window::DimY, Window::Dimension(0, 0, 0));
280     win_vector_sum.set(Window::DimZ, Window::Dimension(0, 0, 0));
281     return win_vector_sum;
282 }
283 
get_vector_sum_col_it(const Window & window,const ITensor * vector_sum_col)284 inline Iterator get_vector_sum_col_it(const Window &window, const ITensor *vector_sum_col)
285 {
286     Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window));
287     return vector_sum_col_it;
288 }
289 
get_vector_sum_row_it(const Window & window,const ITensor * vector_sum_row)290 inline Iterator get_vector_sum_row_it(const Window &window, const ITensor *vector_sum_row)
291 {
292     Window win_vector_sum_row = get_win_vector_sum(window);
293     win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
294     Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
295     return vector_sum_row_it;
296 }
297 
get_bias_it(const Window & window,const ITensor * bias)298 inline Iterator get_bias_it(const Window &window, const ITensor *bias)
299 {
300     Window win_bias(window);
301     win_bias.set(Window::DimY, Window::Dimension(0, 1, 1));
302     win_bias.set(Window::DimZ, Window::Dimension(0, 1, 1));
303     Iterator bias_it(bias, win_bias);
304     return bias_it;
305 }
306 
307 template <typename VT>
run_offset_contribution_output_stage_window(const int32_t * vector_sum_col_ptr,const int32_t * vector_sum_row_ptr,const int32_t * bias_ptr,Iterator mm_result_it,Iterator out_it,const int32x4_t result_offset_s32,const int32x4_t result_shift_s32,typename VT::vtype min_vec,typename VT::vtype max_vec,int32_t a_offset,int32_t b_offset,int32_t k_offset,int32_t multiplier,int32_t shift,int32_t offset,int32_t min_bound,int32_t max_bound,int window_step_x,int window_start_x,int window_end_x,bool has_a_offset,bool has_b_offset,bool has_bias,bool is_bounded_relu,bool is_fixed_point)308 inline void run_offset_contribution_output_stage_window(const int32_t *vector_sum_col_ptr, const int32_t *vector_sum_row_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
309                                                         const int32x4_t result_offset_s32, const int32x4_t result_shift_s32,
310                                                         typename VT::vtype min_vec, typename VT::vtype max_vec,
311                                                         int32_t a_offset, int32_t b_offset, int32_t k_offset,
312                                                         int32_t multiplier, int32_t shift, int32_t offset, int32_t min_bound, int32_t max_bound,
313                                                         int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_b_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
314 {
315     int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
316     if(!is_fixed_point)
317     {
318         // Combine quantization offset with other offsets.
319         offset_term_s32 = add_s32(offset_term_s32, result_offset_s32);
320     }
321     if(has_a_offset && has_b_offset)
322     {
323         offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset));
324     }
325     if(has_b_offset)
326     {
327         offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset));
328     }
329 
330     int x = window_start_x;
331     for(; x <= (window_end_x - window_step_x); x += window_step_x)
332     {
333         int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
334 
335         if(has_a_offset)
336         {
337             in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
338         }
339         if(has_bias)
340         {
341             in_s32 = add_s32(in_s32, load(bias_ptr, x));
342         }
343         if(!is_fixed_point || has_b_offset)
344         {
345             in_s32 = add_s32(in_s32, offset_term_s32);
346         }
347         if(!is_fixed_point)
348         {
349             in_s32 = mul_s32(in_s32, multiplier);
350         }
351 
352         if(is_fixed_point)
353         {
354             wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
355                             finalize_quantization(in_s32, multiplier, shift, result_offset_s32, min_vec, max_vec, is_bounded_relu));
356         }
357         else
358         {
359             wrapper::vstore(reinterpret_cast<typename VT::stype *>(out_it.ptr() + x),
360                             finalize_quantization_floating_point(in_s32, result_shift_s32, min_vec, max_vec, is_bounded_relu));
361         }
362     }
363     // Compute left-over elements
364     for(; x < window_end_x; ++x)
365     {
366         int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
367 
368         if(has_a_offset)
369         {
370             in_value += (*(vector_sum_col_ptr + x) * a_offset);
371         }
372         if(has_bias)
373         {
374             in_value += *(bias_ptr + x);
375         }
376 
377         if(is_fixed_point)
378         {
379             // Finalize and store the result
380             *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = finalize_quantization(in_value, multiplier, shift, offset,
381                                                                                               static_cast<typename VT::stype>(min_bound),
382                                                                                               static_cast<typename VT::stype>(max_bound), is_bounded_relu);
383         }
384         else
385         {
386             // Finalize quantization
387             in_value = (in_value * multiplier) >> shift;
388 
389             // Bound and store the result
390             if(is_bounded_relu)
391             {
392                 in_value = static_cast<typename VT::stype>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
393             }
394             *reinterpret_cast<typename VT::stype *>(out_it.ptr() + x) = static_cast<typename VT::stype>(std::max<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::lowest()),
395                                                                                                                           std::min<int32_t>(static_cast<int32_t>(std::numeric_limits<typename VT::stype>::max()), in_value)));
396         }
397     }
398 }
399 
run_offset_contribution_output_stage_window_symm(const int32_t * vector_sum_col_ptr,const int32_t * bias_ptr,Iterator mm_result_it,Iterator out_it,const int32_t * result_multipliers,const int32_t * result_shifts,const int32x4_t result_offset,int8x16_t min_s8,int8x16_t max_s8,int32_t a_offset,int32_t offset,int32_t min_bound,int32_t max_bound,int window_step_x,int window_start_x,int window_end_x,bool has_a_offset,bool has_bias,bool is_bounded_relu,bool is_fixed_point)400 inline void run_offset_contribution_output_stage_window_symm(const int32_t *vector_sum_col_ptr, const int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,
401                                                              const int32_t *result_multipliers, const int32_t *result_shifts,
402                                                              const int32x4_t result_offset, int8x16_t min_s8, int8x16_t max_s8,
403                                                              int32_t a_offset, int32_t offset, int32_t min_bound, int32_t max_bound,
404                                                              int window_step_x, int window_start_x, int window_end_x, bool has_a_offset, bool has_bias, bool is_bounded_relu, bool is_fixed_point)
405 {
406     int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };
407     if(!is_fixed_point)
408     {
409         // Combine quantization offset with other offsets.
410         offset_term_s32 = add_s32(offset_term_s32, result_offset);
411     }
412 
413     int x = window_start_x;
414     for(; x <= (window_end_x - window_step_x); x += window_step_x)
415     {
416         int32x4x4_t in_s32 = load_results_input(mm_result_it, x);
417 
418         if(has_a_offset)
419         {
420             in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));
421         }
422         if(has_bias)
423         {
424             in_s32 = add_s32(in_s32, load(bias_ptr, x));
425         }
426         if(!is_fixed_point)
427         {
428             in_s32 = add_s32(in_s32, offset_term_s32);
429             in_s32 = mul_s32(in_s32, result_multipliers + x);
430         }
431 
432         if(is_fixed_point)
433         {
434             vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_symm(in_s32, load(result_multipliers, x), load(result_shifts, x), result_offset, min_s8, max_s8, is_bounded_relu));
435         }
436         else
437         {
438             vst1q_s8(reinterpret_cast<int8_t *>(out_it.ptr() + x), finalize_quantization_floating_point(in_s32, load(result_shifts, x), min_s8, max_s8, is_bounded_relu));
439         }
440     }
441     // Compute left-over elements
442     for(; x < window_end_x; ++x)
443     {
444         int32_t in_value = *(reinterpret_cast<const int32_t *>(mm_result_it.ptr()) + x) + wrapper::vgetlane(offset_term_s32.val[0], 0);
445 
446         if(has_a_offset)
447         {
448             in_value += (*(vector_sum_col_ptr + x) * a_offset);
449         }
450         if(has_bias)
451         {
452             in_value += *(bias_ptr + x);
453         }
454 
455         if(is_fixed_point)
456         {
457             // Finalize and store the result
458             *(out_it.ptr() + x) = finalize_quantization(in_value, result_multipliers[x], result_shifts[x], offset, static_cast<int8_t>(min_bound), static_cast<int8_t>(max_bound), is_bounded_relu);
459         }
460         else
461         {
462             // Finalize quantization
463             in_value = (in_value * result_multipliers[x]) >> (-result_shifts[x]);
464 
465             // Bound and store the result
466             if(is_bounded_relu)
467             {
468                 in_value = static_cast<int8_t>(std::max<int32_t>(min_bound, std::min<int32_t>(max_bound, in_value)));
469             }
470             *(out_it.ptr() + x) = static_cast<int8_t>(std::max<int32_t>(-128, std::min<int32_t>(127, in_value)));
471         }
472     }
473 }
474 
475 template <typename T>
run_offset_contribution_output_stage(const Window & window,const ITensor * mm_result,const ITensor * vector_sum_col,const ITensor * vector_sum_row,const ITensor * bias,ITensor * output,int32_t a_offset,int32_t b_offset,int32_t k_offset,bool is_vector_sum_col_batched,GEMMLowpOutputStageInfo output_stage,bool is_gemm3d,bool is_bounded_relu,bool is_fixed_point)476 void run_offset_contribution_output_stage(const Window &window,
477                                           const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
478                                           int32_t a_offset, int32_t b_offset, int32_t k_offset, bool is_vector_sum_col_batched,
479                                           GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
480 {
481     //  Semantics of XYZW Explained for each tensor
482     //
483     //  | Tensor            |    XYZW when is_gemm3d == false       |    XYZW when is_gemm3d == true                    |
484     // -------------------------------------------------------------------------------------------------------------------
485     //  | mm_result         |  x -> width,  y -> height, z -> batch |  x -> width, y -> height, z -> depth, w -> batch  |
486     //  | collapsed window  |  x -> width,  y -> height, z -> batch |  x -> width, y -> height, z -> depth * batch      |
487     //  | vector_sum_row    |  x -> height, y -> batch              |  x -> height * depth, y -> batch                  |
488     //  | Vector_sum_col    |  x -> width,  y -> batch              |  x -> width, y -> batch                           |
489 
490     using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
491     using Typer        = VectorTyper<T>;
492 
493     const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
494     const int depth_input  = is_gemm3d ? mm_result->info()->dimension(2) : 1;
495 
496     const int32_t multiplier = output_stage.gemmlowp_multiplier;
497     const int32_t shift      = output_stage.gemmlowp_shift;
498     const int32_t offset     = output_stage.gemmlowp_offset;
499     const int32_t min_bound  = output_stage.gemmlowp_min_bound;
500     const int32_t max_bound  = output_stage.gemmlowp_max_bound;
501 
502     const int32x4_t result_offset_s32 = vdupq_n_s32(offset);
503     const int32x4_t result_shift_s32  = vdupq_n_s32(is_fixed_point ? shift : -shift);
504     const auto      min_vec           = wrapper::vdup_n(static_cast<T>(min_bound), ExactTagType{});
505     const auto      max_vec           = wrapper::vdup_n(static_cast<T>(max_bound), ExactTagType{});
506 
507     const int  window_step_x  = 16;
508     const auto window_start_x = static_cast<int>(window.x().start());
509     const auto window_end_x   = static_cast<int>(window.x().end());
510 
511     Window win(window);
512     win.set(Window::DimX, Window::Dimension(0, 1, 1));
513 
514     Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
515 
516     Iterator mm_result_it(mm_result, win);
517     Iterator out_it(output, win);
518 
519     if((a_offset != 0) && (b_offset != 0))
520     {
521         ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
522         ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
523 
524         Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
525         Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
526 
527         const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
528 
529         // Offset in case vector_sum_col is batched in y dimension
530         const int vector_sum_col_stride_batch = is_vector_sum_col_batched ? vector_sum_col->info()->strides_in_bytes().y() : 0;
531 
532         if(bias != nullptr)
533         {
534             Iterator bias_it = get_bias_it(collapsed_window, bias);
535             execute_window_loop(collapsed_window, [&](const Coordinates & id)
536             {
537                 const int  batch_id           = id.z() / depth_input;
538                 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_stride_batch);
539                 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
540                                                 + id.y() + (id.z() % depth_input) * height_input;
541                 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()),
542                                                                    mm_result_it,
543                                                                    out_it,
544                                                                    result_offset_s32, result_shift_s32,
545                                                                    min_vec, max_vec, a_offset, b_offset, k_offset,
546                                                                    multiplier, shift, offset, min_bound, max_bound,
547                                                                    window_step_x, window_start_x, window_end_x, true, true, true, is_bounded_relu, is_fixed_point);
548             },
549             vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it);
550         }
551         else
552         {
553             execute_window_loop(collapsed_window, [&](const Coordinates & id)
554             {
555                 const int  batch_id           = id.z() / depth_input;
556                 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_stride_batch);
557                 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
558                                                 + id.y() + (id.z() % depth_input) * height_input;
559                 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
560                                                                    result_offset_s32, result_shift_s32,
561                                                                    min_vec, max_vec, a_offset, b_offset, k_offset,
562                                                                    multiplier, shift, offset, min_bound, max_bound,
563                                                                    window_step_x, window_start_x, window_end_x, true, true, false, is_bounded_relu, is_fixed_point);
564             },
565             vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it);
566         }
567     }
568     else if((a_offset == 0) && (b_offset != 0))
569     {
570         ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
571 
572         Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);
573 
574         const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
575 
576         if(bias != nullptr)
577         {
578             Iterator bias_it = get_bias_it(collapsed_window, bias);
579             execute_window_loop(collapsed_window, [&](const Coordinates & id)
580             {
581                 const int  batch_id           = id.z() / depth_input;
582                 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
583                                                 + id.y() + (id.z() % depth_input) * height_input;
584                 run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
585                                                                    out_it,
586                                                                    result_offset_s32, result_shift_s32,
587                                                                    min_vec, max_vec, a_offset, b_offset, k_offset,
588                                                                    multiplier, shift, offset, min_bound, max_bound,
589                                                                    window_step_x, window_start_x, window_end_x, false, true, true, is_bounded_relu, is_fixed_point);
590             },
591             vector_sum_row_it, bias_it, mm_result_it, out_it);
592         }
593         else
594         {
595             execute_window_loop(collapsed_window, [&](const Coordinates & id)
596             {
597                 const int  batch_id           = id.z() / depth_input;
598                 const auto vector_sum_row_ptr = reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)
599                                                 + id.y() + (id.z() % depth_input) * height_input;
600                 run_offset_contribution_output_stage_window<Typer>(nullptr, vector_sum_row_ptr, nullptr, mm_result_it, out_it,
601                                                                    result_offset_s32, result_shift_s32,
602                                                                    min_vec, max_vec, a_offset, b_offset, k_offset,
603                                                                    multiplier, shift, offset, min_bound, max_bound,
604                                                                    window_step_x, window_start_x, window_end_x, false, true, false, is_bounded_relu, is_fixed_point);
605             },
606             vector_sum_row_it, mm_result_it, out_it);
607         }
608     }
609     else if((a_offset != 0) && (b_offset == 0))
610     {
611         ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
612 
613         Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
614 
615         // Offset in case vector_sum_col is batched in y dimension
616         const int vector_sum_col_stride_batch = is_vector_sum_col_batched ? vector_sum_col->info()->strides_in_bytes().y() : 0;
617 
618         if(bias != nullptr)
619         {
620             Iterator bias_it = get_bias_it(collapsed_window, bias);
621             execute_window_loop(collapsed_window, [&](const Coordinates & id)
622             {
623                 const int  batch_id           = id.z() / depth_input;
624                 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_stride_batch);
625                 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it,
626                                                                    out_it,
627                                                                    result_offset_s32, result_shift_s32,
628                                                                    min_vec, max_vec, a_offset, b_offset, k_offset,
629                                                                    multiplier, shift, offset, min_bound, max_bound,
630                                                                    window_step_x, window_start_x, window_end_x, true, false, true, is_bounded_relu, is_fixed_point);
631             },
632             vector_sum_col_it, bias_it, mm_result_it, out_it);
633         }
634         else
635         {
636             execute_window_loop(collapsed_window, [&](const Coordinates & id)
637             {
638                 const int  batch_id           = id.z() / depth_input;
639                 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_stride_batch);
640                 run_offset_contribution_output_stage_window<Typer>(vector_sum_col_ptr, nullptr, nullptr, mm_result_it, out_it,
641                                                                    result_offset_s32, result_shift_s32,
642                                                                    min_vec, max_vec, a_offset, b_offset, k_offset,
643                                                                    multiplier, shift, offset, min_bound, max_bound,
644                                                                    window_step_x, window_start_x, window_end_x, true, false, false, is_bounded_relu, is_fixed_point);
645             },
646             vector_sum_col_it, mm_result_it, out_it);
647         }
648     }
649     else
650     {
651         if(bias != nullptr)
652         {
653             Iterator bias_it = get_bias_it(collapsed_window, bias);
654             execute_window_loop(collapsed_window, [&](const Coordinates &)
655             {
656                 run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
657                                                                    result_offset_s32, result_shift_s32,
658                                                                    min_vec, max_vec, a_offset, b_offset, k_offset,
659                                                                    multiplier, shift, offset, min_bound, max_bound,
660                                                                    window_step_x, window_start_x, window_end_x, false, false, true, is_bounded_relu, is_fixed_point);
661             },
662             bias_it, mm_result_it, out_it);
663         }
664         else
665         {
666             execute_window_loop(collapsed_window, [&](const Coordinates &)
667             {
668                 run_offset_contribution_output_stage_window<Typer>(nullptr, nullptr, nullptr, mm_result_it, out_it,
669                                                                    result_offset_s32, result_shift_s32,
670                                                                    min_vec, max_vec, a_offset, b_offset, k_offset,
671                                                                    multiplier, shift, offset, min_bound, max_bound,
672                                                                    window_step_x, window_start_x, window_end_x, false, false, false, is_bounded_relu, is_fixed_point);
673             },
674             mm_result_it, out_it);
675         }
676         return;
677     }
678 }
679 
run_offset_contribution_output_stage_symm(const Window & window,const ITensor * mm_result,const ITensor * vector_sum_col,const ITensor * vector_sum_row,const ITensor * bias,ITensor * output,int32_t a_offset,int32_t b_offset,int32_t k_offset,bool is_vector_sum_col_batched,GEMMLowpOutputStageInfo output_stage,bool is_gemm3d,bool is_bounded_relu,bool is_fixed_point)680 void run_offset_contribution_output_stage_symm(const Window &window,
681                                                const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output,
682                                                int32_t a_offset, int32_t b_offset, int32_t k_offset, bool is_vector_sum_col_batched,
683                                                GEMMLowpOutputStageInfo output_stage, bool is_gemm3d, bool is_bounded_relu, bool is_fixed_point)
684 {
685     ARM_COMPUTE_UNUSED(vector_sum_row, b_offset, k_offset);
686 
687     const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
688 
689     const int32_t offset    = output_stage.gemmlowp_offset;
690     const int32_t min_bound = output_stage.gemmlowp_min_bound;
691     const int32_t max_bound = output_stage.gemmlowp_max_bound;
692 
693     const int32_t *result_multipliers = output_stage.gemmlowp_multipliers.data();
694     const int32_t *result_shifts      = output_stage.gemmlowp_shifts.data();
695     const int32x4_t result_offset_s32  = vdupq_n_s32(offset);
696     const int8x16_t min_s8             = vdupq_n_s8(static_cast<int8_t>(min_bound));
697     const int8x16_t max_s8             = vdupq_n_s8(static_cast<int8_t>(max_bound));
698 
699     const int  window_step_x  = 16;
700     const auto window_start_x = static_cast<int>(window.x().start());
701     const auto window_end_x   = static_cast<int>(window.x().end());
702 
703     Window win(window);
704     win.set(Window::DimX, Window::Dimension(0, 1, 1));
705 
706     Window collapsed_window = win.collapse_if_possible(win, Window::DimZ);
707 
708     Iterator mm_result_it(mm_result, win);
709     Iterator out_it(output, win);
710 
711     if(a_offset != 0)
712     {
713         ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_col);
714 
715         Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);
716 
717         // Offset in case vector_sum_col is batched in y dimension
718         const int vector_sum_col_stride_batch = is_vector_sum_col_batched ? vector_sum_col->info()->strides_in_bytes().y() : 0;
719 
720         if(bias != nullptr)
721         {
722             Iterator bias_it = get_bias_it(collapsed_window, bias);
723             execute_window_loop(collapsed_window, [&](const Coordinates & id)
724             {
725                 const int  batch_id           = id.z() / depth_input;
726                 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_stride_batch);
727                 run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
728                                                                  result_multipliers, result_shifts,
729                                                                  result_offset_s32, min_s8, max_s8,
730                                                                  a_offset, offset, min_bound, max_bound,
731                                                                  window_step_x, window_start_x, window_end_x, true, true, is_bounded_relu, is_fixed_point);
732             },
733             vector_sum_col_it, bias_it, mm_result_it, out_it);
734         }
735         else
736         {
737             execute_window_loop(collapsed_window, [&](const Coordinates & id)
738             {
739                 const int  batch_id           = id.z() / depth_input;
740                 const auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_id * vector_sum_col_stride_batch);
741                 run_offset_contribution_output_stage_window_symm(vector_sum_col_ptr, nullptr, mm_result_it, out_it,
742                                                                  result_multipliers, result_shifts,
743                                                                  result_offset_s32, min_s8, max_s8,
744                                                                  a_offset, offset, min_bound, max_bound,
745                                                                  window_step_x, window_start_x, window_end_x, true, false, is_bounded_relu, is_fixed_point);
746             },
747             vector_sum_col_it, mm_result_it, out_it);
748         }
749     }
750     else
751     {
752         if(bias != nullptr)
753         {
754             Iterator bias_it = get_bias_it(collapsed_window, bias);
755             execute_window_loop(collapsed_window, [&](const Coordinates &)
756             {
757                 run_offset_contribution_output_stage_window_symm(nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
758                                                                  result_multipliers, result_shifts,
759                                                                  result_offset_s32, min_s8, max_s8,
760                                                                  a_offset, offset, min_bound, max_bound,
761                                                                  window_step_x, window_start_x, window_end_x, false, true, is_bounded_relu, is_fixed_point);
762             },
763             bias_it, mm_result_it, out_it);
764         }
765         else
766         {
767             execute_window_loop(collapsed_window, [&](const Coordinates &)
768             {
769                 run_offset_contribution_output_stage_window_symm(nullptr, nullptr, mm_result_it, out_it,
770                                                                  result_multipliers, result_shifts,
771                                                                  result_offset_s32, min_s8, max_s8,
772                                                                  a_offset, offset, min_bound, max_bound,
773                                                                  window_step_x, window_start_x, window_end_x, false, false, is_bounded_relu, is_fixed_point);
774             },
775             mm_result_it, out_it);
776         }
777         return;
778     }
779 }
780 
validate_arguments(const ITensorInfo * mm_result,const ITensorInfo * vector_sum_col,const ITensorInfo * vector_sum_row,const ITensorInfo * bias,const ITensorInfo * output,int32_t a_offset,int32_t b_offset,GEMMLowpOutputStageInfo output_stage)781 Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
782                           int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
783 {
784     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
785     if(output->data_type() != DataType::QASYMM8)
786     {
787         ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) > 1 && output_stage.gemmlowp_multipliers.size() > 1 && b_offset != 0);
788     }
789     ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
790     ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN && output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT);
791 
792     if(bias != nullptr)
793     {
794         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
795         ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
796         ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
797     }
798 
799     // If a_offset == 0, vector_sum_col can be a nullptr
800     if(a_offset != 0)
801     {
802         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
803         ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
804         ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->num_dimensions() > 2);
805     }
806 
807     // If b_offset == 0, vector_sum_row can be a nullptr
808     if(b_offset != 0)
809     {
810         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
811 
812         // Check if input is a 3D reinterpretation
813         const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
814 
815         // Validate input
816         ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
817         ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
818 
819         TensorShape output_shape = output->tensor_shape();
820         if(output_shape.num_dimensions() > 1)
821         {
822             const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
823 
824             TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
825             vector_sum_row_shape.collapse_from(1);
826             output_shape.collapse_from(output_batch_idx);
827 
828             ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
829                                             "mm_result tensor must have the same number of batches of output tensor");
830 
831             if(a_offset != 0)
832             {
833                 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
834                 vector_sum_col_shape.collapse_from(1);
835 
836                 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
837                                                 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
838             }
839         }
840 
841         // Check Tensor Rank of vector_sum_row
842         ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_row->num_dimensions() > 3);
843     }
844 
845     if(output->total_size() != 0)
846     {
847         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
848         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
849     }
850 
851     return Status{};
852 }
853 } // namespace
854 
configure(const ITensorInfo * mm_result,const ITensorInfo * vector_sum_col,const ITensorInfo * vector_sum_row,const ITensorInfo * bias,ITensorInfo * dst,int32_t k,int32_t a_offset,int32_t b_offset,GEMMLowpOutputStageInfo output_stage)855 void CpuGemmLowpOffsetContributionOutputStageKernel::configure(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col,
856                                                                const ITensorInfo *vector_sum_row, const ITensorInfo *bias, ITensorInfo *dst,
857                                                                int32_t k, int32_t a_offset, int32_t b_offset,
858                                                                GEMMLowpOutputStageInfo output_stage)
859 {
860     ARM_COMPUTE_UNUSED(vector_sum_row, bias);
861     // Perform validate step
862     ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, dst);
863     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, dst, a_offset, b_offset, output_stage));
864 
865     _a_offset     = a_offset;
866     _b_offset     = b_offset;
867     _k_offset     = a_offset * b_offset * k;
868     _output_stage = output_stage;
869 
870     // If a_offset == 0, vector_sum_col can be a nullptr
871     if(a_offset != 0)
872     {
873         // Check if vector_sum_col_shape should be slidden or not
874         // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
875         // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
876         _is_vector_sum_col_batched = vector_sum_col->tensor_shape().num_dimensions() > 1;
877     }
878 
879     // Output auto inizialitation if not yet initialized
880     auto_init_if_empty(*dst, mm_result->clone()->set_data_type(DataType::QASYMM8));
881 
882     // Configure kernel window
883     Window win = calculate_max_window(*mm_result, Steps());
884 
885     // Note: This kernel performs 16 elements per iteration.
886     // However, since we use a left-over for loop, we cannot have any read or write out of memory
887     // For this reason num_elems_processed_per_iteration is 1 and so update_window_and_padding() can be skipped
888     ICpuKernel::configure(win);
889 }
890 
validate(const ITensorInfo * mm_result,const ITensorInfo * vector_sum_col,const ITensorInfo * vector_sum_row,const ITensorInfo * bias,const ITensorInfo * output,int32_t a_offset,int32_t b_offset,GEMMLowpOutputStageInfo output_stage)891 Status CpuGemmLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col,
892                                                                 const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
893                                                                 int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)
894 {
895     ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
896     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage));
897     return Status{};
898 }
899 
run_op(ITensorPack & tensors,const Window & window,const ThreadInfo & info)900 void CpuGemmLowpOffsetContributionOutputStageKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
901 {
902     ARM_COMPUTE_UNUSED(info);
903     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
904     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
905 
906     auto mm_result      = tensors.get_const_tensor(TensorType::ACL_SRC_0);
907     auto vector_sum_col = tensors.get_const_tensor(TensorType::ACL_SRC_1);
908     auto vector_sum_row = tensors.get_const_tensor(TensorType::ACL_SRC_2);
909     auto bias           = tensors.get_const_tensor(TensorType::ACL_SRC_3);
910     auto dst            = tensors.get_tensor(TensorType::ACL_DST);
911 
912     PixelValue type_min{};
913     PixelValue type_max{};
914     std::tie(type_min, type_max) = get_min_max(dst->info()->data_type());
915     int32_t type_min_int = type_min.get<int32_t>();
916     int32_t type_max_int = type_max.get<int32_t>();
917 
918     const bool reinterpret_as_3d = vector_sum_row != nullptr
919                                    && mm_result->info()->num_dimensions() > 1
920                                    && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
921 
922     const bool is_bounded_relu = !(_output_stage.gemmlowp_min_bound <= type_min_int && _output_stage.gemmlowp_max_bound >= type_max_int);
923 
924     // Check if we need to perform fixed point requantization
925     const bool is_fixed_point = _output_stage.type != GEMMLowpOutputStageType::QUANTIZE_DOWN;
926 
927     // Check if symmetric per-channel execution
928     const bool is_signed = dst->info()->data_type() == DataType::QASYMM8_SIGNED;
929 
930     // Check if symmetric per-channel execution
931     const bool is_symm = _output_stage.is_quantized_per_channel;
932 
933     if(is_symm)
934     {
935         run_offset_contribution_output_stage_symm(window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, _k_offset, _is_vector_sum_col_batched, _output_stage,
936                                                   reinterpret_as_3d, is_bounded_relu, is_fixed_point);
937     }
938     else
939     {
940         if(is_signed)
941         {
942             run_offset_contribution_output_stage<int8_t>(window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, _k_offset, _is_vector_sum_col_batched, _output_stage,
943                                                          reinterpret_as_3d, is_bounded_relu, is_fixed_point);
944         }
945         else
946         {
947             run_offset_contribution_output_stage<uint8_t>(window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, _k_offset, _is_vector_sum_col_batched, _output_stage,
948                                                           reinterpret_as_3d, is_bounded_relu, is_fixed_point);
949         }
950     }
951 }
952 
name() const953 const char *CpuGemmLowpOffsetContributionOutputStageKernel::name() const
954 {
955     return "CpuGemmLowpOffsetContributionOutputStageKernel";
956 }
957 } // namespace kernels
958 } // namespace cpu
959 } // namespace arm_compute
960