xref: /aosp_15_r20/external/libaom/av1/common/arm/cfl_neon.c (revision 77c1e3ccc04c968bd2bc212e87364f250e820521)
1 /*
2  * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 #include <arm_neon.h>
12 
13 #include "config/aom_config.h"
14 #include "config/av1_rtcd.h"
15 
16 #include "av1/common/cfl.h"
17 
vldsubstq_s16(int16_t * dst,const uint16_t * src,int offset,int16x8_t sub)18 static inline void vldsubstq_s16(int16_t *dst, const uint16_t *src, int offset,
19                                  int16x8_t sub) {
20   vst1q_s16(dst + offset,
21             vsubq_s16(vreinterpretq_s16_u16(vld1q_u16(src + offset)), sub));
22 }
23 
vldaddq_u16(const uint16_t * buf,size_t offset)24 static inline uint16x8_t vldaddq_u16(const uint16_t *buf, size_t offset) {
25   return vaddq_u16(vld1q_u16(buf), vld1q_u16(buf + offset));
26 }
27 
28 // Load half of a vector and duplicated in other half
vldh_dup_u8(const uint8_t * ptr)29 static inline uint8x8_t vldh_dup_u8(const uint8_t *ptr) {
30   return vreinterpret_u8_u32(vld1_dup_u32((const uint32_t *)ptr));
31 }
32 
33 // Store half of a vector.
vsth_u16(uint16_t * ptr,uint16x4_t val)34 static inline void vsth_u16(uint16_t *ptr, uint16x4_t val) {
35   vst1_lane_u32((uint32_t *)ptr, vreinterpret_u32_u16(val), 0);
36 }
37 
38 // Store half of a vector.
vsth_u8(uint8_t * ptr,uint8x8_t val)39 static inline void vsth_u8(uint8_t *ptr, uint8x8_t val) {
40   vst1_lane_u32((uint32_t *)ptr, vreinterpret_u32_u8(val), 0);
41 }
42 
cfl_luma_subsampling_420_lbd_neon(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)43 static void cfl_luma_subsampling_420_lbd_neon(const uint8_t *input,
44                                               int input_stride,
45                                               uint16_t *pred_buf_q3, int width,
46                                               int height) {
47   const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
48   const int luma_stride = input_stride << 1;
49   do {
50     if (width == 4) {
51       const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
52       const uint16x4_t sum = vpadal_u8(top, vldh_dup_u8(input + input_stride));
53       vsth_u16(pred_buf_q3, vshl_n_u16(sum, 1));
54     } else if (width == 8) {
55       const uint16x4_t top = vpaddl_u8(vld1_u8(input));
56       const uint16x4_t sum = vpadal_u8(top, vld1_u8(input + input_stride));
57       vst1_u16(pred_buf_q3, vshl_n_u16(sum, 1));
58     } else if (width == 16) {
59       const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
60       const uint16x8_t sum = vpadalq_u8(top, vld1q_u8(input + input_stride));
61       vst1q_u16(pred_buf_q3, vshlq_n_u16(sum, 1));
62     } else {
63       const uint8x8x4_t top = vld4_u8(input);
64       const uint8x8x4_t bot = vld4_u8(input + input_stride);
65       // equivalent to a vpaddlq_u8 (because vld4q interleaves)
66       const uint16x8_t top_0 = vaddl_u8(top.val[0], top.val[1]);
67       // equivalent to a vpaddlq_u8 (because vld4q interleaves)
68       const uint16x8_t bot_0 = vaddl_u8(bot.val[0], bot.val[1]);
69       // equivalent to a vpaddlq_u8 (because vld4q interleaves)
70       const uint16x8_t top_1 = vaddl_u8(top.val[2], top.val[3]);
71       // equivalent to a vpaddlq_u8 (because vld4q interleaves)
72       const uint16x8_t bot_1 = vaddl_u8(bot.val[2], bot.val[3]);
73       uint16x8x2_t sum;
74       sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
75       sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
76       vst2q_u16(pred_buf_q3, sum);
77     }
78     input += luma_stride;
79   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
80 }
81 
cfl_luma_subsampling_422_lbd_neon(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)82 static void cfl_luma_subsampling_422_lbd_neon(const uint8_t *input,
83                                               int input_stride,
84                                               uint16_t *pred_buf_q3, int width,
85                                               int height) {
86   const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
87   do {
88     if (width == 4) {
89       const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
90       vsth_u16(pred_buf_q3, vshl_n_u16(top, 2));
91     } else if (width == 8) {
92       const uint16x4_t top = vpaddl_u8(vld1_u8(input));
93       vst1_u16(pred_buf_q3, vshl_n_u16(top, 2));
94     } else if (width == 16) {
95       const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
96       vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 2));
97     } else {
98       const uint8x8x4_t top = vld4_u8(input);
99       uint16x8x2_t sum;
100       // vaddl_u8 is equivalent to a vpaddlq_u8 (because vld4q interleaves)
101       sum.val[0] = vshlq_n_u16(vaddl_u8(top.val[0], top.val[1]), 2);
102       sum.val[1] = vshlq_n_u16(vaddl_u8(top.val[2], top.val[3]), 2);
103       vst2q_u16(pred_buf_q3, sum);
104     }
105     input += input_stride;
106   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
107 }
108 
cfl_luma_subsampling_444_lbd_neon(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)109 static void cfl_luma_subsampling_444_lbd_neon(const uint8_t *input,
110                                               int input_stride,
111                                               uint16_t *pred_buf_q3, int width,
112                                               int height) {
113   const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
114   do {
115     if (width == 4) {
116       const uint16x8_t top = vshll_n_u8(vldh_dup_u8(input), 3);
117       vst1_u16(pred_buf_q3, vget_low_u16(top));
118     } else if (width == 8) {
119       const uint16x8_t top = vshll_n_u8(vld1_u8(input), 3);
120       vst1q_u16(pred_buf_q3, top);
121     } else {
122       const uint8x16_t top = vld1q_u8(input);
123       vst1q_u16(pred_buf_q3, vshll_n_u8(vget_low_u8(top), 3));
124       vst1q_u16(pred_buf_q3 + 8, vshll_n_u8(vget_high_u8(top), 3));
125       if (width == 32) {
126         const uint8x16_t next_top = vld1q_u8(input + 16);
127         vst1q_u16(pred_buf_q3 + 16, vshll_n_u8(vget_low_u8(next_top), 3));
128         vst1q_u16(pred_buf_q3 + 24, vshll_n_u8(vget_high_u8(next_top), 3));
129       }
130     }
131     input += input_stride;
132   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
133 }
134 
135 #if CONFIG_AV1_HIGHBITDEPTH
136 #if !AOM_ARCH_AARCH64
vpaddq_u16(uint16x8_t a,uint16x8_t b)137 static uint16x8_t vpaddq_u16(uint16x8_t a, uint16x8_t b) {
138   return vcombine_u16(vpadd_u16(vget_low_u16(a), vget_high_u16(a)),
139                       vpadd_u16(vget_low_u16(b), vget_high_u16(b)));
140 }
141 #endif
142 
cfl_luma_subsampling_420_hbd_neon(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)143 static void cfl_luma_subsampling_420_hbd_neon(const uint16_t *input,
144                                               int input_stride,
145                                               uint16_t *pred_buf_q3, int width,
146                                               int height) {
147   const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
148   const int luma_stride = input_stride << 1;
149   do {
150     if (width == 4) {
151       const uint16x4_t top = vld1_u16(input);
152       const uint16x4_t bot = vld1_u16(input + input_stride);
153       const uint16x4_t sum = vadd_u16(top, bot);
154       const uint16x4_t hsum = vpadd_u16(sum, sum);
155       vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
156     } else if (width < 32) {
157       const uint16x8_t top = vld1q_u16(input);
158       const uint16x8_t bot = vld1q_u16(input + input_stride);
159       const uint16x8_t sum = vaddq_u16(top, bot);
160       if (width == 8) {
161         const uint16x4_t hsum = vget_low_u16(vpaddq_u16(sum, sum));
162         vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
163       } else {
164         const uint16x8_t top_1 = vld1q_u16(input + 8);
165         const uint16x8_t bot_1 = vld1q_u16(input + 8 + input_stride);
166         const uint16x8_t sum_1 = vaddq_u16(top_1, bot_1);
167         const uint16x8_t hsum = vpaddq_u16(sum, sum_1);
168         vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 1));
169       }
170     } else {
171       const uint16x8x4_t top = vld4q_u16(input);
172       const uint16x8x4_t bot = vld4q_u16(input + input_stride);
173       // equivalent to a vpaddq_u16 (because vld4q interleaves)
174       const uint16x8_t top_0 = vaddq_u16(top.val[0], top.val[1]);
175       // equivalent to a vpaddq_u16 (because vld4q interleaves)
176       const uint16x8_t bot_0 = vaddq_u16(bot.val[0], bot.val[1]);
177       // equivalent to a vpaddq_u16 (because vld4q interleaves)
178       const uint16x8_t top_1 = vaddq_u16(top.val[2], top.val[3]);
179       // equivalent to a vpaddq_u16 (because vld4q interleaves)
180       const uint16x8_t bot_1 = vaddq_u16(bot.val[2], bot.val[3]);
181       uint16x8x2_t sum;
182       sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
183       sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
184       vst2q_u16(pred_buf_q3, sum);
185     }
186     input += luma_stride;
187   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
188 }
189 
cfl_luma_subsampling_422_hbd_neon(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)190 static void cfl_luma_subsampling_422_hbd_neon(const uint16_t *input,
191                                               int input_stride,
192                                               uint16_t *pred_buf_q3, int width,
193                                               int height) {
194   const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
195   do {
196     if (width == 4) {
197       const uint16x4_t top = vld1_u16(input);
198       const uint16x4_t hsum = vpadd_u16(top, top);
199       vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
200     } else if (width == 8) {
201       const uint16x4x2_t top = vld2_u16(input);
202       // equivalent to a vpadd_u16 (because vld2 interleaves)
203       const uint16x4_t hsum = vadd_u16(top.val[0], top.val[1]);
204       vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
205     } else if (width == 16) {
206       const uint16x8x2_t top = vld2q_u16(input);
207       // equivalent to a vpaddq_u16 (because vld2q interleaves)
208       const uint16x8_t hsum = vaddq_u16(top.val[0], top.val[1]);
209       vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 2));
210     } else {
211       const uint16x8x4_t top = vld4q_u16(input);
212       // equivalent to a vpaddq_u16 (because vld4q interleaves)
213       const uint16x8_t hsum_0 = vaddq_u16(top.val[0], top.val[1]);
214       // equivalent to a vpaddq_u16 (because vld4q interleaves)
215       const uint16x8_t hsum_1 = vaddq_u16(top.val[2], top.val[3]);
216       uint16x8x2_t result = { { vshlq_n_u16(hsum_0, 2),
217                                 vshlq_n_u16(hsum_1, 2) } };
218       vst2q_u16(pred_buf_q3, result);
219     }
220     input += input_stride;
221   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
222 }
223 
cfl_luma_subsampling_444_hbd_neon(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)224 static void cfl_luma_subsampling_444_hbd_neon(const uint16_t *input,
225                                               int input_stride,
226                                               uint16_t *pred_buf_q3, int width,
227                                               int height) {
228   const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
229   do {
230     if (width == 4) {
231       const uint16x4_t top = vld1_u16(input);
232       vst1_u16(pred_buf_q3, vshl_n_u16(top, 3));
233     } else if (width == 8) {
234       const uint16x8_t top = vld1q_u16(input);
235       vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 3));
236     } else if (width == 16) {
237       uint16x8x2_t top = vld2q_u16(input);
238       top.val[0] = vshlq_n_u16(top.val[0], 3);
239       top.val[1] = vshlq_n_u16(top.val[1], 3);
240       vst2q_u16(pred_buf_q3, top);
241     } else {
242       uint16x8x4_t top = vld4q_u16(input);
243       top.val[0] = vshlq_n_u16(top.val[0], 3);
244       top.val[1] = vshlq_n_u16(top.val[1], 3);
245       top.val[2] = vshlq_n_u16(top.val[2], 3);
246       top.val[3] = vshlq_n_u16(top.val[3], 3);
247       vst4q_u16(pred_buf_q3, top);
248     }
249     input += input_stride;
250   } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
251 }
252 #endif  // CONFIG_AV1_HIGHBITDEPTH
253 
CFL_GET_SUBSAMPLE_FUNCTION(neon)254 CFL_GET_SUBSAMPLE_FUNCTION(neon)
255 
256 static inline void subtract_average_neon(const uint16_t *src, int16_t *dst,
257                                          int width, int height,
258                                          int round_offset,
259                                          const int num_pel_log2) {
260   const uint16_t *const end = src + height * CFL_BUF_LINE;
261 
262   // Round offset is not needed, because NEON will handle the rounding.
263   (void)round_offset;
264 
265   // To optimize the use of the CPU pipeline, we process 4 rows per iteration
266   const int step = 4 * CFL_BUF_LINE;
267 
268   // At this stage, the prediction buffer contains scaled reconstructed luma
269   // pixels, which are positive integer and only require 15 bits. By using
270   // unsigned integer for the sum, we can do one addition operation inside 16
271   // bits (8 lanes) before having to convert to 32 bits (4 lanes).
272   const uint16_t *sum_buf = src;
273   uint32x4_t sum_32x4 = vdupq_n_u32(0);
274   do {
275     // For all widths, we load, add and combine the data so it fits in 4 lanes.
276     if (width == 4) {
277       const uint16x4_t a0 =
278           vadd_u16(vld1_u16(sum_buf), vld1_u16(sum_buf + CFL_BUF_LINE));
279       const uint16x4_t a1 = vadd_u16(vld1_u16(sum_buf + 2 * CFL_BUF_LINE),
280                                      vld1_u16(sum_buf + 3 * CFL_BUF_LINE));
281       sum_32x4 = vaddq_u32(sum_32x4, vaddl_u16(a0, a1));
282     } else if (width == 8) {
283       const uint16x8_t a0 = vldaddq_u16(sum_buf, CFL_BUF_LINE);
284       const uint16x8_t a1 =
285           vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, CFL_BUF_LINE);
286       sum_32x4 = vpadalq_u16(sum_32x4, a0);
287       sum_32x4 = vpadalq_u16(sum_32x4, a1);
288     } else {
289       const uint16x8_t row0 = vldaddq_u16(sum_buf, 8);
290       const uint16x8_t row1 = vldaddq_u16(sum_buf + CFL_BUF_LINE, 8);
291       const uint16x8_t row2 = vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, 8);
292       const uint16x8_t row3 = vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE, 8);
293       sum_32x4 = vpadalq_u16(sum_32x4, row0);
294       sum_32x4 = vpadalq_u16(sum_32x4, row1);
295       sum_32x4 = vpadalq_u16(sum_32x4, row2);
296       sum_32x4 = vpadalq_u16(sum_32x4, row3);
297 
298       if (width == 32) {
299         const uint16x8_t row0_1 = vldaddq_u16(sum_buf + 16, 8);
300         const uint16x8_t row1_1 = vldaddq_u16(sum_buf + CFL_BUF_LINE + 16, 8);
301         const uint16x8_t row2_1 =
302             vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE + 16, 8);
303         const uint16x8_t row3_1 =
304             vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE + 16, 8);
305 
306         sum_32x4 = vpadalq_u16(sum_32x4, row0_1);
307         sum_32x4 = vpadalq_u16(sum_32x4, row1_1);
308         sum_32x4 = vpadalq_u16(sum_32x4, row2_1);
309         sum_32x4 = vpadalq_u16(sum_32x4, row3_1);
310       }
311     }
312     sum_buf += step;
313   } while (sum_buf < end);
314 
315   // Permute and add in such a way that each lane contains the block sum.
316   // [A+C+B+D, B+D+A+C, C+A+D+B, D+B+C+A]
317 #if AOM_ARCH_AARCH64
318   sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
319   sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
320 #else
321   uint32x4_t flip =
322       vcombine_u32(vget_high_u32(sum_32x4), vget_low_u32(sum_32x4));
323   sum_32x4 = vaddq_u32(sum_32x4, flip);
324   sum_32x4 = vaddq_u32(sum_32x4, vrev64q_u32(sum_32x4));
325 #endif
326 
327   // Computing the average could be done using scalars, but getting off the NEON
328   // engine introduces latency, so we use vqrshrn.
329   int16x4_t avg_16x4;
330   // Constant propagation makes for some ugly code.
331   switch (num_pel_log2) {
332     case 4: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 4)); break;
333     case 5: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 5)); break;
334     case 6: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 6)); break;
335     case 7: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 7)); break;
336     case 8: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 8)); break;
337     case 9: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 9)); break;
338     case 10:
339       avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 10));
340       break;
341     default: assert(0);
342   }
343 
344   if (width == 4) {
345     do {
346       vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4));
347       src += CFL_BUF_LINE;
348       dst += CFL_BUF_LINE;
349     } while (src < end);
350   } else {
351     const int16x8_t avg_16x8 = vcombine_s16(avg_16x4, avg_16x4);
352     do {
353       vldsubstq_s16(dst, src, 0, avg_16x8);
354       vldsubstq_s16(dst, src, CFL_BUF_LINE, avg_16x8);
355       vldsubstq_s16(dst, src, 2 * CFL_BUF_LINE, avg_16x8);
356       vldsubstq_s16(dst, src, 3 * CFL_BUF_LINE, avg_16x8);
357 
358       if (width > 8) {
359         vldsubstq_s16(dst, src, 8, avg_16x8);
360         vldsubstq_s16(dst, src, 8 + CFL_BUF_LINE, avg_16x8);
361         vldsubstq_s16(dst, src, 8 + 2 * CFL_BUF_LINE, avg_16x8);
362         vldsubstq_s16(dst, src, 8 + 3 * CFL_BUF_LINE, avg_16x8);
363       }
364       if (width == 32) {
365         vldsubstq_s16(dst, src, 16, avg_16x8);
366         vldsubstq_s16(dst, src, 16 + CFL_BUF_LINE, avg_16x8);
367         vldsubstq_s16(dst, src, 16 + 2 * CFL_BUF_LINE, avg_16x8);
368         vldsubstq_s16(dst, src, 16 + 3 * CFL_BUF_LINE, avg_16x8);
369         vldsubstq_s16(dst, src, 24, avg_16x8);
370         vldsubstq_s16(dst, src, 24 + CFL_BUF_LINE, avg_16x8);
371         vldsubstq_s16(dst, src, 24 + 2 * CFL_BUF_LINE, avg_16x8);
372         vldsubstq_s16(dst, src, 24 + 3 * CFL_BUF_LINE, avg_16x8);
373       }
374       src += step;
375       dst += step;
376     } while (src < end);
377   }
378 }
379 
CFL_SUB_AVG_FN(neon)380 CFL_SUB_AVG_FN(neon)
381 
382 // Saturating negate 16-bit integers in a when the corresponding signed 16-bit
383 // integer in b is negative.
384 // Notes:
385 //   * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
386 //   practice, as scaled_luma is the multiplication of two absolute values.
387 //   * In the Intel equivalent, elements in a are zeroed out when the
388 //   corresponding elements in b are zero. Because vsign is used twice in a
389 //   row, with b in the first call becoming a in the second call, there's no
390 //   impact from not zeroing out.
391 static int16x4_t vsign_s16(int16x4_t a, int16x4_t b) {
392   const int16x4_t mask = vshr_n_s16(b, 15);
393   return veor_s16(vadd_s16(a, mask), mask);
394 }
395 
396 // Saturating negate 16-bit integers in a when the corresponding signed 16-bit
397 // integer in b is negative.
398 // Notes:
399 //   * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
400 //   practice, as scaled_luma is the multiplication of two absolute values.
401 //   * In the Intel equivalent, elements in a are zeroed out when the
402 //   corresponding elements in b are zero. Because vsignq is used twice in a
403 //   row, with b in the first call becoming a in the second call, there's no
404 //   impact from not zeroing out.
vsignq_s16(int16x8_t a,int16x8_t b)405 static int16x8_t vsignq_s16(int16x8_t a, int16x8_t b) {
406   const int16x8_t mask = vshrq_n_s16(b, 15);
407   return veorq_s16(vaddq_s16(a, mask), mask);
408 }
409 
predict_w4(const int16_t * pred_buf_q3,int16x4_t alpha_sign,int abs_alpha_q12,int16x4_t dc)410 static inline int16x4_t predict_w4(const int16_t *pred_buf_q3,
411                                    int16x4_t alpha_sign, int abs_alpha_q12,
412                                    int16x4_t dc) {
413   const int16x4_t ac_q3 = vld1_s16(pred_buf_q3);
414   const int16x4_t ac_sign = veor_s16(alpha_sign, ac_q3);
415   int16x4_t scaled_luma = vqrdmulh_n_s16(vabs_s16(ac_q3), abs_alpha_q12);
416   return vadd_s16(vsign_s16(scaled_luma, ac_sign), dc);
417 }
418 
predict_w8(const int16_t * pred_buf_q3,int16x8_t alpha_sign,int abs_alpha_q12,int16x8_t dc)419 static inline int16x8_t predict_w8(const int16_t *pred_buf_q3,
420                                    int16x8_t alpha_sign, int abs_alpha_q12,
421                                    int16x8_t dc) {
422   const int16x8_t ac_q3 = vld1q_s16(pred_buf_q3);
423   const int16x8_t ac_sign = veorq_s16(alpha_sign, ac_q3);
424   int16x8_t scaled_luma = vqrdmulhq_n_s16(vabsq_s16(ac_q3), abs_alpha_q12);
425   return vaddq_s16(vsignq_s16(scaled_luma, ac_sign), dc);
426 }
427 
predict_w16(const int16_t * pred_buf_q3,int16x8_t alpha_sign,int abs_alpha_q12,int16x8_t dc)428 static inline int16x8x2_t predict_w16(const int16_t *pred_buf_q3,
429                                       int16x8_t alpha_sign, int abs_alpha_q12,
430                                       int16x8_t dc) {
431   // vld2q_s16 interleaves, which is not useful for prediction. vst1q_s16_x2
432   // does not interleave, but is not currently available in the compilier used
433   // by the AOM build system.
434   const int16x8x2_t ac_q3 = vld2q_s16(pred_buf_q3);
435   const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
436   const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
437   const int16x8_t scaled_luma_0 =
438       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
439   const int16x8_t scaled_luma_1 =
440       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
441   int16x8x2_t result;
442   result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
443   result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
444   return result;
445 }
446 
predict_w32(const int16_t * pred_buf_q3,int16x8_t alpha_sign,int abs_alpha_q12,int16x8_t dc)447 static inline int16x8x4_t predict_w32(const int16_t *pred_buf_q3,
448                                       int16x8_t alpha_sign, int abs_alpha_q12,
449                                       int16x8_t dc) {
450   // vld4q_s16 interleaves, which is not useful for prediction. vst1q_s16_x4
451   // does not interleave, but is not currently available in the compilier used
452   // by the AOM build system.
453   const int16x8x4_t ac_q3 = vld4q_s16(pred_buf_q3);
454   const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
455   const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
456   const int16x8_t ac_sign_2 = veorq_s16(alpha_sign, ac_q3.val[2]);
457   const int16x8_t ac_sign_3 = veorq_s16(alpha_sign, ac_q3.val[3]);
458   const int16x8_t scaled_luma_0 =
459       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
460   const int16x8_t scaled_luma_1 =
461       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
462   const int16x8_t scaled_luma_2 =
463       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[2]), abs_alpha_q12);
464   const int16x8_t scaled_luma_3 =
465       vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[3]), abs_alpha_q12);
466   int16x8x4_t result;
467   result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
468   result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
469   result.val[2] = vaddq_s16(vsignq_s16(scaled_luma_2, ac_sign_2), dc);
470   result.val[3] = vaddq_s16(vsignq_s16(scaled_luma_3, ac_sign_3), dc);
471   return result;
472 }
473 
cfl_predict_lbd_neon(const int16_t * pred_buf_q3,uint8_t * dst,int dst_stride,int alpha_q3,int width,int height)474 static inline void cfl_predict_lbd_neon(const int16_t *pred_buf_q3,
475                                         uint8_t *dst, int dst_stride,
476                                         int alpha_q3, int width, int height) {
477   const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
478   const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
479   if (width == 4) {
480     const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
481     const int16x4_t dc = vdup_n_s16(*dst);
482     do {
483       const int16x4_t pred =
484           predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
485       vsth_u8(dst, vqmovun_s16(vcombine_s16(pred, pred)));
486       dst += dst_stride;
487     } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
488   } else {
489     const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
490     const int16x8_t dc = vdupq_n_s16(*dst);
491     do {
492       if (width == 8) {
493         vst1_u8(dst, vqmovun_s16(predict_w8(pred_buf_q3, alpha_sign,
494                                             abs_alpha_q12, dc)));
495       } else if (width == 16) {
496         const int16x8x2_t pred =
497             predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
498         const uint8x8x2_t predun = { { vqmovun_s16(pred.val[0]),
499                                        vqmovun_s16(pred.val[1]) } };
500         vst2_u8(dst, predun);
501       } else {
502         const int16x8x4_t pred =
503             predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
504         const uint8x8x4_t predun = {
505           { vqmovun_s16(pred.val[0]), vqmovun_s16(pred.val[1]),
506             vqmovun_s16(pred.val[2]), vqmovun_s16(pred.val[3]) }
507         };
508         vst4_u8(dst, predun);
509       }
510       dst += dst_stride;
511     } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
512   }
513 }
514 
CFL_PREDICT_FN(neon,lbd)515 CFL_PREDICT_FN(neon, lbd)
516 
517 #if CONFIG_AV1_HIGHBITDEPTH
518 static inline uint16x4_t clamp_s16(int16x4_t a, int16x4_t max) {
519   return vreinterpret_u16_s16(vmax_s16(vmin_s16(a, max), vdup_n_s16(0)));
520 }
521 
clampq_s16(int16x8_t a,int16x8_t max)522 static inline uint16x8_t clampq_s16(int16x8_t a, int16x8_t max) {
523   return vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(a, max), vdupq_n_s16(0)));
524 }
525 
clamp2q_s16(int16x8x2_t a,int16x8_t max)526 static inline uint16x8x2_t clamp2q_s16(int16x8x2_t a, int16x8_t max) {
527   uint16x8x2_t result;
528   result.val[0] = vreinterpretq_u16_s16(
529       vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
530   result.val[1] = vreinterpretq_u16_s16(
531       vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
532   return result;
533 }
534 
clamp4q_s16(int16x8x4_t a,int16x8_t max)535 static inline uint16x8x4_t clamp4q_s16(int16x8x4_t a, int16x8_t max) {
536   uint16x8x4_t result;
537   result.val[0] = vreinterpretq_u16_s16(
538       vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
539   result.val[1] = vreinterpretq_u16_s16(
540       vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
541   result.val[2] = vreinterpretq_u16_s16(
542       vmaxq_s16(vminq_s16(a.val[2], max), vdupq_n_s16(0)));
543   result.val[3] = vreinterpretq_u16_s16(
544       vmaxq_s16(vminq_s16(a.val[3], max), vdupq_n_s16(0)));
545   return result;
546 }
547 
cfl_predict_hbd_neon(const int16_t * pred_buf_q3,uint16_t * dst,int dst_stride,int alpha_q3,int bd,int width,int height)548 static inline void cfl_predict_hbd_neon(const int16_t *pred_buf_q3,
549                                         uint16_t *dst, int dst_stride,
550                                         int alpha_q3, int bd, int width,
551                                         int height) {
552   const int max = (1 << bd) - 1;
553   const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
554   const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
555   if (width == 4) {
556     const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
557     const int16x4_t dc = vdup_n_s16(*dst);
558     const int16x4_t max_16x4 = vdup_n_s16(max);
559     do {
560       const int16x4_t scaled_luma =
561           predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
562       vst1_u16(dst, clamp_s16(scaled_luma, max_16x4));
563       dst += dst_stride;
564     } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
565   } else {
566     const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
567     const int16x8_t dc = vdupq_n_s16(*dst);
568     const int16x8_t max_16x8 = vdupq_n_s16(max);
569     do {
570       if (width == 8) {
571         const int16x8_t pred =
572             predict_w8(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
573         vst1q_u16(dst, clampq_s16(pred, max_16x8));
574       } else if (width == 16) {
575         const int16x8x2_t pred =
576             predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
577         vst2q_u16(dst, clamp2q_s16(pred, max_16x8));
578       } else {
579         const int16x8x4_t pred =
580             predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
581         vst4q_u16(dst, clamp4q_s16(pred, max_16x8));
582       }
583       dst += dst_stride;
584     } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
585   }
586 }
587 
588 CFL_PREDICT_FN(neon, hbd)
589 #endif  // CONFIG_AV1_HIGHBITDEPTH
590