1 /*
2 * Copyright (c) 2023, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <arm_neon.h>
13 #include <assert.h>
14
15 #include "config/aom_dsp_rtcd.h"
16 #include "config/aom_config.h"
17
18 #include "aom_dsp/arm/mem_neon.h"
19 #include "aom_dsp/arm/sum_neon.h"
20
get_blk_sse_sum_4xh_neon(const int16_t * data,int stride,int bh,int * x_sum,int64_t * x2_sum)21 static inline void get_blk_sse_sum_4xh_neon(const int16_t *data, int stride,
22 int bh, int *x_sum,
23 int64_t *x2_sum) {
24 int i = bh;
25 int32x4_t sum = vdupq_n_s32(0);
26 int32x4_t sse = vdupq_n_s32(0);
27
28 do {
29 int16x8_t d = vcombine_s16(vld1_s16(data), vld1_s16(data + stride));
30
31 sum = vpadalq_s16(sum, d);
32
33 sse = vmlal_s16(sse, vget_low_s16(d), vget_low_s16(d));
34 sse = vmlal_s16(sse, vget_high_s16(d), vget_high_s16(d));
35
36 data += 2 * stride;
37 i -= 2;
38 } while (i != 0);
39
40 *x_sum = horizontal_add_s32x4(sum);
41 *x2_sum = horizontal_long_add_s32x4(sse);
42 }
43
get_blk_sse_sum_8xh_neon(const int16_t * data,int stride,int bh,int * x_sum,int64_t * x2_sum)44 static inline void get_blk_sse_sum_8xh_neon(const int16_t *data, int stride,
45 int bh, int *x_sum,
46 int64_t *x2_sum) {
47 int i = bh;
48 int32x4_t sum = vdupq_n_s32(0);
49 int32x4_t sse = vdupq_n_s32(0);
50
51 // Input is 12-bit wide, so we can add up to 127 squared elements in a signed
52 // 32-bits element. Since we're accumulating into an int32x4_t and the maximum
53 // value for bh is 32, we don't have to worry about sse overflowing.
54
55 do {
56 int16x8_t d = vld1q_s16(data);
57
58 sum = vpadalq_s16(sum, d);
59
60 sse = vmlal_s16(sse, vget_low_s16(d), vget_low_s16(d));
61 sse = vmlal_s16(sse, vget_high_s16(d), vget_high_s16(d));
62
63 data += stride;
64 } while (--i != 0);
65
66 *x_sum = horizontal_add_s32x4(sum);
67 *x2_sum = horizontal_long_add_s32x4(sse);
68 }
69
get_blk_sse_sum_large_neon(const int16_t * data,int stride,int bw,int bh,int * x_sum,int64_t * x2_sum)70 static inline void get_blk_sse_sum_large_neon(const int16_t *data, int stride,
71 int bw, int bh, int *x_sum,
72 int64_t *x2_sum) {
73 int32x4_t sum = vdupq_n_s32(0);
74 int64x2_t sse = vdupq_n_s64(0);
75
76 // Input is 12-bit wide, so we can add up to 127 squared elements in a signed
77 // 32-bits element. Since we're accumulating into an int32x4_t vector that
78 // means we can process up to (127*4)/bw rows before we need to widen to
79 // 64 bits.
80
81 int i_limit = (127 * 4) / bw;
82 int i_tmp = bh > i_limit ? i_limit : bh;
83
84 int i = 0;
85 do {
86 int32x4_t sse_s32 = vdupq_n_s32(0);
87 do {
88 int j = bw;
89 const int16_t *data_ptr = data;
90 do {
91 int16x8_t d = vld1q_s16(data_ptr);
92
93 sum = vpadalq_s16(sum, d);
94
95 sse_s32 = vmlal_s16(sse_s32, vget_low_s16(d), vget_low_s16(d));
96 sse_s32 = vmlal_s16(sse_s32, vget_high_s16(d), vget_high_s16(d));
97
98 data_ptr += 8;
99 j -= 8;
100 } while (j != 0);
101
102 data += stride;
103 i++;
104 } while (i < i_tmp && i < bh);
105
106 sse = vpadalq_s32(sse, sse_s32);
107 i_tmp += i_limit;
108 } while (i < bh);
109
110 *x_sum = horizontal_add_s32x4(sum);
111 *x2_sum = horizontal_add_s64x2(sse);
112 }
113
aom_get_blk_sse_sum_neon(const int16_t * data,int stride,int bw,int bh,int * x_sum,int64_t * x2_sum)114 void aom_get_blk_sse_sum_neon(const int16_t *data, int stride, int bw, int bh,
115 int *x_sum, int64_t *x2_sum) {
116 if (bw == 4) {
117 get_blk_sse_sum_4xh_neon(data, stride, bh, x_sum, x2_sum);
118 } else if (bw == 8) {
119 get_blk_sse_sum_8xh_neon(data, stride, bh, x_sum, x2_sum);
120 } else {
121 assert(bw % 8 == 0);
122 get_blk_sse_sum_large_neon(data, stride, bw, bh, x_sum, x2_sum);
123 }
124 }
125