1 /*
2 * Copyright (c) 2023 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12
13 #include "./vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
15
16 #include "vpx/vpx_integer.h"
17 #include "vpx_dsp/arm/mem_neon.h"
18 #include "vpx_dsp/arm/sum_neon.h"
19
highbd_sad4xhx4d_neon(const uint8_t * src_ptr,int src_stride,const uint8_t * const ref_ptr[4],int ref_stride,uint32_t res[4],int h)20 static INLINE void highbd_sad4xhx4d_neon(const uint8_t *src_ptr, int src_stride,
21 const uint8_t *const ref_ptr[4],
22 int ref_stride, uint32_t res[4],
23 int h) {
24 const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
25 const uint16_t *ref16_ptr0 = CONVERT_TO_SHORTPTR(ref_ptr[0]);
26 const uint16_t *ref16_ptr1 = CONVERT_TO_SHORTPTR(ref_ptr[1]);
27 const uint16_t *ref16_ptr2 = CONVERT_TO_SHORTPTR(ref_ptr[2]);
28 const uint16_t *ref16_ptr3 = CONVERT_TO_SHORTPTR(ref_ptr[3]);
29
30 uint32x4_t sum[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0),
31 vdupq_n_u32(0) };
32
33 int i = 0;
34 do {
35 uint16x4_t s = vld1_u16(src16_ptr + i * src_stride);
36 uint16x4_t r0 = vld1_u16(ref16_ptr0 + i * ref_stride);
37 uint16x4_t r1 = vld1_u16(ref16_ptr1 + i * ref_stride);
38 uint16x4_t r2 = vld1_u16(ref16_ptr2 + i * ref_stride);
39 uint16x4_t r3 = vld1_u16(ref16_ptr3 + i * ref_stride);
40
41 sum[0] = vabal_u16(sum[0], s, r0);
42 sum[1] = vabal_u16(sum[1], s, r1);
43 sum[2] = vabal_u16(sum[2], s, r2);
44 sum[3] = vabal_u16(sum[3], s, r3);
45
46 } while (++i < h);
47
48 vst1q_u32(res, horizontal_add_4d_uint32x4(sum));
49 }
50
highbd_sad8xhx4d_neon(const uint8_t * src_ptr,int src_stride,const uint8_t * const ref_ptr[4],int ref_stride,uint32_t res[4],int h)51 static INLINE void highbd_sad8xhx4d_neon(const uint8_t *src_ptr, int src_stride,
52 const uint8_t *const ref_ptr[4],
53 int ref_stride, uint32_t res[4],
54 int h) {
55 const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
56 const uint16_t *ref16_ptr0 = CONVERT_TO_SHORTPTR(ref_ptr[0]);
57 const uint16_t *ref16_ptr1 = CONVERT_TO_SHORTPTR(ref_ptr[1]);
58 const uint16_t *ref16_ptr2 = CONVERT_TO_SHORTPTR(ref_ptr[2]);
59 const uint16_t *ref16_ptr3 = CONVERT_TO_SHORTPTR(ref_ptr[3]);
60
61 uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0),
62 vdupq_n_u16(0) };
63 uint32x4_t sum_u32[4];
64
65 int i = 0;
66 do {
67 uint16x8_t s = vld1q_u16(src16_ptr + i * src_stride);
68
69 sum[0] = vabaq_u16(sum[0], s, vld1q_u16(ref16_ptr0 + i * ref_stride));
70 sum[1] = vabaq_u16(sum[1], s, vld1q_u16(ref16_ptr1 + i * ref_stride));
71 sum[2] = vabaq_u16(sum[2], s, vld1q_u16(ref16_ptr2 + i * ref_stride));
72 sum[3] = vabaq_u16(sum[3], s, vld1q_u16(ref16_ptr3 + i * ref_stride));
73
74 } while (++i < h);
75
76 sum_u32[0] = vpaddlq_u16(sum[0]);
77 sum_u32[1] = vpaddlq_u16(sum[1]);
78 sum_u32[2] = vpaddlq_u16(sum[2]);
79 sum_u32[3] = vpaddlq_u16(sum[3]);
80 vst1q_u32(res, horizontal_add_4d_uint32x4(sum_u32));
81 }
82
sad8_neon(uint16x8_t src,uint16x8_t ref,uint32x4_t * const sad_sum)83 static INLINE void sad8_neon(uint16x8_t src, uint16x8_t ref,
84 uint32x4_t *const sad_sum) {
85 uint16x8_t abs_diff = vabdq_u16(src, ref);
86 *sad_sum = vpadalq_u16(*sad_sum, abs_diff);
87 }
88
highbd_sad16xhx4d_neon(const uint8_t * src_ptr,int src_stride,const uint8_t * const ref_ptr[4],int ref_stride,uint32_t res[4],int h)89 static INLINE void highbd_sad16xhx4d_neon(const uint8_t *src_ptr,
90 int src_stride,
91 const uint8_t *const ref_ptr[4],
92 int ref_stride, uint32_t res[4],
93 int h) {
94 const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
95 const uint16_t *ref16_ptr0 = CONVERT_TO_SHORTPTR(ref_ptr[0]);
96 const uint16_t *ref16_ptr1 = CONVERT_TO_SHORTPTR(ref_ptr[1]);
97 const uint16_t *ref16_ptr2 = CONVERT_TO_SHORTPTR(ref_ptr[2]);
98 const uint16_t *ref16_ptr3 = CONVERT_TO_SHORTPTR(ref_ptr[3]);
99
100 uint32x4_t sum_lo[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0),
101 vdupq_n_u32(0) };
102 uint32x4_t sum_hi[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0),
103 vdupq_n_u32(0) };
104 uint32x4_t sum[4];
105
106 int i = 0;
107 do {
108 uint16x8_t s0, s1;
109
110 s0 = vld1q_u16(src16_ptr + i * src_stride);
111 sad8_neon(s0, vld1q_u16(ref16_ptr0 + i * ref_stride), &sum_lo[0]);
112 sad8_neon(s0, vld1q_u16(ref16_ptr1 + i * ref_stride), &sum_lo[1]);
113 sad8_neon(s0, vld1q_u16(ref16_ptr2 + i * ref_stride), &sum_lo[2]);
114 sad8_neon(s0, vld1q_u16(ref16_ptr3 + i * ref_stride), &sum_lo[3]);
115
116 s1 = vld1q_u16(src16_ptr + i * src_stride + 8);
117 sad8_neon(s1, vld1q_u16(ref16_ptr0 + i * ref_stride + 8), &sum_hi[0]);
118 sad8_neon(s1, vld1q_u16(ref16_ptr1 + i * ref_stride + 8), &sum_hi[1]);
119 sad8_neon(s1, vld1q_u16(ref16_ptr2 + i * ref_stride + 8), &sum_hi[2]);
120 sad8_neon(s1, vld1q_u16(ref16_ptr3 + i * ref_stride + 8), &sum_hi[3]);
121
122 } while (++i < h);
123
124 sum[0] = vaddq_u32(sum_lo[0], sum_hi[0]);
125 sum[1] = vaddq_u32(sum_lo[1], sum_hi[1]);
126 sum[2] = vaddq_u32(sum_lo[2], sum_hi[2]);
127 sum[3] = vaddq_u32(sum_lo[3], sum_hi[3]);
128
129 vst1q_u32(res, horizontal_add_4d_uint32x4(sum));
130 }
131
highbd_sadwxhx4d_neon(const uint8_t * src_ptr,int src_stride,const uint8_t * const ref_ptr[4],int ref_stride,uint32_t res[4],int w,int h)132 static INLINE void highbd_sadwxhx4d_neon(const uint8_t *src_ptr, int src_stride,
133 const uint8_t *const ref_ptr[4],
134 int ref_stride, uint32_t res[4], int w,
135 int h) {
136 const uint16_t *src16_ptr = CONVERT_TO_SHORTPTR(src_ptr);
137 const uint16_t *ref16_ptr0 = CONVERT_TO_SHORTPTR(ref_ptr[0]);
138 const uint16_t *ref16_ptr1 = CONVERT_TO_SHORTPTR(ref_ptr[1]);
139 const uint16_t *ref16_ptr2 = CONVERT_TO_SHORTPTR(ref_ptr[2]);
140 const uint16_t *ref16_ptr3 = CONVERT_TO_SHORTPTR(ref_ptr[3]);
141
142 uint32x4_t sum_lo[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0),
143 vdupq_n_u32(0) };
144 uint32x4_t sum_hi[4] = { vdupq_n_u32(0), vdupq_n_u32(0), vdupq_n_u32(0),
145 vdupq_n_u32(0) };
146 uint32x4_t sum[4];
147
148 int i = 0;
149 do {
150 int j = 0;
151 do {
152 uint16x8_t s0, s1, s2, s3;
153
154 s0 = vld1q_u16(src16_ptr + i * src_stride + j);
155 sad8_neon(s0, vld1q_u16(ref16_ptr0 + i * ref_stride + j), &sum_lo[0]);
156 sad8_neon(s0, vld1q_u16(ref16_ptr1 + i * ref_stride + j), &sum_lo[1]);
157 sad8_neon(s0, vld1q_u16(ref16_ptr2 + i * ref_stride + j), &sum_lo[2]);
158 sad8_neon(s0, vld1q_u16(ref16_ptr3 + i * ref_stride + j), &sum_lo[3]);
159
160 s1 = vld1q_u16(src16_ptr + i * src_stride + j + 8);
161 sad8_neon(s1, vld1q_u16(ref16_ptr0 + i * ref_stride + j + 8), &sum_hi[0]);
162 sad8_neon(s1, vld1q_u16(ref16_ptr1 + i * ref_stride + j + 8), &sum_hi[1]);
163 sad8_neon(s1, vld1q_u16(ref16_ptr2 + i * ref_stride + j + 8), &sum_hi[2]);
164 sad8_neon(s1, vld1q_u16(ref16_ptr3 + i * ref_stride + j + 8), &sum_hi[3]);
165
166 s2 = vld1q_u16(src16_ptr + i * src_stride + j + 16);
167 sad8_neon(s2, vld1q_u16(ref16_ptr0 + i * ref_stride + j + 16),
168 &sum_lo[0]);
169 sad8_neon(s2, vld1q_u16(ref16_ptr1 + i * ref_stride + j + 16),
170 &sum_lo[1]);
171 sad8_neon(s2, vld1q_u16(ref16_ptr2 + i * ref_stride + j + 16),
172 &sum_lo[2]);
173 sad8_neon(s2, vld1q_u16(ref16_ptr3 + i * ref_stride + j + 16),
174 &sum_lo[3]);
175
176 s3 = vld1q_u16(src16_ptr + i * src_stride + j + 24);
177 sad8_neon(s3, vld1q_u16(ref16_ptr0 + i * ref_stride + j + 24),
178 &sum_hi[0]);
179 sad8_neon(s3, vld1q_u16(ref16_ptr1 + i * ref_stride + j + 24),
180 &sum_hi[1]);
181 sad8_neon(s3, vld1q_u16(ref16_ptr2 + i * ref_stride + j + 24),
182 &sum_hi[2]);
183 sad8_neon(s3, vld1q_u16(ref16_ptr3 + i * ref_stride + j + 24),
184 &sum_hi[3]);
185
186 j += 32;
187 } while (j < w);
188
189 } while (++i < h);
190
191 sum[0] = vaddq_u32(sum_lo[0], sum_hi[0]);
192 sum[1] = vaddq_u32(sum_lo[1], sum_hi[1]);
193 sum[2] = vaddq_u32(sum_lo[2], sum_hi[2]);
194 sum[3] = vaddq_u32(sum_lo[3], sum_hi[3]);
195
196 vst1q_u32(res, horizontal_add_4d_uint32x4(sum));
197 }
198
highbd_sad64xhx4d_neon(const uint8_t * src_ptr,int src_stride,const uint8_t * const ref_ptr[4],int ref_stride,uint32_t res[4],int h)199 static INLINE void highbd_sad64xhx4d_neon(const uint8_t *src_ptr,
200 int src_stride,
201 const uint8_t *const ref_ptr[4],
202 int ref_stride, uint32_t res[4],
203 int h) {
204 highbd_sadwxhx4d_neon(src_ptr, src_stride, ref_ptr, ref_stride, res, 64, h);
205 }
206
highbd_sad32xhx4d_neon(const uint8_t * src_ptr,int src_stride,const uint8_t * const ref_ptr[4],int ref_stride,uint32_t res[4],int h)207 static INLINE void highbd_sad32xhx4d_neon(const uint8_t *src_ptr,
208 int src_stride,
209 const uint8_t *const ref_ptr[4],
210 int ref_stride, uint32_t res[4],
211 int h) {
212 highbd_sadwxhx4d_neon(src_ptr, src_stride, ref_ptr, ref_stride, res, 32, h);
213 }
214
215 #define HBD_SAD_WXH_4D_NEON(w, h) \
216 void vpx_highbd_sad##w##x##h##x4d_neon( \
217 const uint8_t *src, int src_stride, const uint8_t *const ref_array[4], \
218 int ref_stride, uint32_t sad_array[4]) { \
219 highbd_sad##w##xhx4d_neon(src, src_stride, ref_array, ref_stride, \
220 sad_array, (h)); \
221 }
222
223 HBD_SAD_WXH_4D_NEON(4, 4)
224 HBD_SAD_WXH_4D_NEON(4, 8)
225
226 HBD_SAD_WXH_4D_NEON(8, 4)
227 HBD_SAD_WXH_4D_NEON(8, 8)
228 HBD_SAD_WXH_4D_NEON(8, 16)
229
230 HBD_SAD_WXH_4D_NEON(16, 8)
231 HBD_SAD_WXH_4D_NEON(16, 16)
232 HBD_SAD_WXH_4D_NEON(16, 32)
233
234 HBD_SAD_WXH_4D_NEON(32, 16)
235 HBD_SAD_WXH_4D_NEON(32, 32)
236 HBD_SAD_WXH_4D_NEON(32, 64)
237
238 HBD_SAD_WXH_4D_NEON(64, 32)
239 HBD_SAD_WXH_4D_NEON(64, 64)
240
241 #undef HBD_SAD_WXH_4D_NEON
242
243 #define HBD_SAD_SKIP_WXH_4D_NEON(w, h) \
244 void vpx_highbd_sad_skip_##w##x##h##x4d_neon( \
245 const uint8_t *src, int src_stride, const uint8_t *const ref_array[4], \
246 int ref_stride, uint32_t sad_array[4]) { \
247 highbd_sad##w##xhx4d_neon(src, 2 * src_stride, ref_array, 2 * ref_stride, \
248 sad_array, ((h) >> 1)); \
249 sad_array[0] <<= 1; \
250 sad_array[1] <<= 1; \
251 sad_array[2] <<= 1; \
252 sad_array[3] <<= 1; \
253 }
254
255 HBD_SAD_SKIP_WXH_4D_NEON(4, 4)
256 HBD_SAD_SKIP_WXH_4D_NEON(4, 8)
257
258 HBD_SAD_SKIP_WXH_4D_NEON(8, 4)
259 HBD_SAD_SKIP_WXH_4D_NEON(8, 8)
260 HBD_SAD_SKIP_WXH_4D_NEON(8, 16)
261
262 HBD_SAD_SKIP_WXH_4D_NEON(16, 8)
263 HBD_SAD_SKIP_WXH_4D_NEON(16, 16)
264 HBD_SAD_SKIP_WXH_4D_NEON(16, 32)
265
266 HBD_SAD_SKIP_WXH_4D_NEON(32, 16)
267 HBD_SAD_SKIP_WXH_4D_NEON(32, 32)
268 HBD_SAD_SKIP_WXH_4D_NEON(32, 64)
269
270 HBD_SAD_SKIP_WXH_4D_NEON(64, 32)
271 HBD_SAD_SKIP_WXH_4D_NEON(64, 64)
272
273 #undef HBD_SAD_SKIP_WXH_4D_NEON
274