xref: /aosp_15_r20/external/libaom/av1/common/arm/blend_a64_hmask_neon.c (revision 77c1e3ccc04c968bd2bc212e87364f250e820521)
1 /*
2  *
3  * Copyright (c) 2018, Alliance for Open Media. All rights reserved.
4  *
5  * This source code is subject to the terms of the BSD 2 Clause License and
6  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
7  * was not distributed with this source code in the LICENSE file, you can
8  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
9  * Media Patent License 1.0 was not distributed with this source code in the
10  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
11  */
12 
13 #include <arm_neon.h>
14 #include <assert.h>
15 
16 #include "config/aom_dsp_rtcd.h"
17 
18 #include "aom/aom_integer.h"
19 #include "aom_dsp/aom_dsp_common.h"
20 #include "aom_dsp/arm/blend_neon.h"
21 #include "aom_dsp/arm/mem_neon.h"
22 
aom_blend_a64_hmask_neon(uint8_t * dst,uint32_t dst_stride,const uint8_t * src0,uint32_t src0_stride,const uint8_t * src1,uint32_t src1_stride,const uint8_t * mask,int w,int h)23 void aom_blend_a64_hmask_neon(uint8_t *dst, uint32_t dst_stride,
24                               const uint8_t *src0, uint32_t src0_stride,
25                               const uint8_t *src1, uint32_t src1_stride,
26                               const uint8_t *mask, int w, int h) {
27   assert(IMPLIES(src0 == dst, src0_stride == dst_stride));
28   assert(IMPLIES(src1 == dst, src1_stride == dst_stride));
29 
30   assert(h >= 2);
31   assert(w >= 2);
32   assert(IS_POWER_OF_TWO(h));
33   assert(IS_POWER_OF_TWO(w));
34 
35   if (w > 8) {
36     do {
37       int i = 0;
38       do {
39         uint8x16_t m0 = vld1q_u8(mask + i);
40         uint8x16_t s0 = vld1q_u8(src0 + i);
41         uint8x16_t s1 = vld1q_u8(src1 + i);
42 
43         uint8x16_t blend = alpha_blend_a64_u8x16(m0, s0, s1);
44 
45         vst1q_u8(dst + i, blend);
46 
47         i += 16;
48       } while (i < w);
49 
50       src0 += src0_stride;
51       src1 += src1_stride;
52       dst += dst_stride;
53     } while (--h != 0);
54   } else if (w == 8) {
55     const uint8x8_t m0 = vld1_u8(mask);
56     do {
57       uint8x8_t s0 = vld1_u8(src0);
58       uint8x8_t s1 = vld1_u8(src1);
59 
60       uint8x8_t blend = alpha_blend_a64_u8x8(m0, s0, s1);
61 
62       vst1_u8(dst, blend);
63 
64       src0 += src0_stride;
65       src1 += src1_stride;
66       dst += dst_stride;
67     } while (--h != 0);
68   } else if (w == 4) {
69     const uint8x8_t m0 = load_unaligned_dup_u8_4x2(mask);
70     do {
71       uint8x8_t s0 = load_unaligned_u8_4x2(src0, src0_stride);
72       uint8x8_t s1 = load_unaligned_u8_4x2(src1, src1_stride);
73 
74       uint8x8_t blend = alpha_blend_a64_u8x8(m0, s0, s1);
75 
76       store_u8x4_strided_x2(dst, dst_stride, blend);
77 
78       src0 += 2 * src0_stride;
79       src1 += 2 * src1_stride;
80       dst += 2 * dst_stride;
81       h -= 2;
82     } while (h != 0);
83   } else if (w == 2 && h >= 16) {
84     const uint8x8_t m0 = vreinterpret_u8_u16(vld1_dup_u16((uint16_t *)mask));
85     do {
86       uint8x8_t s0 = load_unaligned_u8_2x2(src0, src0_stride);
87       uint8x8_t s1 = load_unaligned_u8_2x2(src1, src1_stride);
88 
89       uint8x8_t blend = alpha_blend_a64_u8x8(m0, s0, s1);
90 
91       store_u8x2_strided_x2(dst, dst_stride, blend);
92 
93       src0 += 2 * src0_stride;
94       src1 += 2 * src1_stride;
95       dst += 2 * dst_stride;
96       h -= 2;
97     } while (h != 0);
98   } else {
99     aom_blend_a64_hmask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
100                           mask, w, h);
101   }
102 }
103