1 /*
2 *
3 * Copyright (c) 2018, Alliance for Open Media. All rights reserved.
4 *
5 * This source code is subject to the terms of the BSD 2 Clause License and
6 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
7 * was not distributed with this source code in the LICENSE file, you can
8 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
9 * Media Patent License 1.0 was not distributed with this source code in the
10 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
11 */
12
13 #include <arm_neon.h>
14 #include <assert.h>
15
16 #include "aom/aom_integer.h"
17 #include "aom_dsp/aom_dsp_common.h"
18 #include "aom_dsp/blend.h"
19 #include "aom_dsp/arm/blend_neon.h"
20 #include "aom_dsp/arm/mem_neon.h"
21 #include "aom_ports/mem.h"
22 #include "config/aom_dsp_rtcd.h"
23
aom_blend_a64_vmask_neon(uint8_t * dst,uint32_t dst_stride,const uint8_t * src0,uint32_t src0_stride,const uint8_t * src1,uint32_t src1_stride,const uint8_t * mask,int w,int h)24 void aom_blend_a64_vmask_neon(uint8_t *dst, uint32_t dst_stride,
25 const uint8_t *src0, uint32_t src0_stride,
26 const uint8_t *src1, uint32_t src1_stride,
27 const uint8_t *mask, int w, int h) {
28 assert(IMPLIES(src0 == dst, src0_stride == dst_stride));
29 assert(IMPLIES(src1 == dst, src1_stride == dst_stride));
30
31 assert(h >= 2);
32 assert(w >= 2);
33 assert(IS_POWER_OF_TWO(h));
34 assert(IS_POWER_OF_TWO(w));
35
36 if (w > 8) {
37 do {
38 uint8x16_t m0 = vdupq_n_u8(mask[0]);
39 int i = 0;
40 do {
41 uint8x16_t s0 = vld1q_u8(src0 + i);
42 uint8x16_t s1 = vld1q_u8(src1 + i);
43
44 uint8x16_t blend = alpha_blend_a64_u8x16(m0, s0, s1);
45
46 vst1q_u8(dst + i, blend);
47
48 i += 16;
49 } while (i < w);
50
51 mask += 1;
52 src0 += src0_stride;
53 src1 += src1_stride;
54 dst += dst_stride;
55 } while (--h != 0);
56 } else if (w == 8) {
57 do {
58 uint8x8_t m0 = vdup_n_u8(mask[0]);
59 uint8x8_t s0 = vld1_u8(src0);
60 uint8x8_t s1 = vld1_u8(src1);
61
62 uint8x8_t blend = alpha_blend_a64_u8x8(m0, s0, s1);
63
64 vst1_u8(dst, blend);
65
66 mask += 1;
67 src0 += src0_stride;
68 src1 += src1_stride;
69 dst += dst_stride;
70 } while (--h != 0);
71 } else if (w == 4) {
72 do {
73 const uint16x4_t m0 = vdup_n_u16((uint16_t)mask[0]);
74 const uint16x4_t m1 = vdup_n_u16((uint16_t)mask[1]);
75 const uint8x8_t m = vmovn_u16(vcombine_u16(m0, m1));
76 uint8x8_t s0 = load_unaligned_u8_4x2(src0, src0_stride);
77 uint8x8_t s1 = load_unaligned_u8_4x2(src1, src1_stride);
78
79 uint8x8_t blend = alpha_blend_a64_u8x8(m, s0, s1);
80
81 store_u8x4_strided_x2(dst, dst_stride, blend);
82
83 mask += 2;
84 src0 += 2 * src0_stride;
85 src1 += 2 * src1_stride;
86 dst += 2 * dst_stride;
87 h -= 2;
88 } while (h != 0);
89 } else if (w == 2 && h >= 16) {
90 do {
91 uint16x4_t m0 = vdup_n_u16(0);
92 m0 = vld1_lane_u16((uint16_t *)mask, m0, 0);
93 uint8x8_t m =
94 vzip_u8(vreinterpret_u8_u16(m0), vreinterpret_u8_u16(m0)).val[0];
95 uint8x8_t s0 = load_unaligned_u8_2x2(src0, src0_stride);
96 uint8x8_t s1 = load_unaligned_u8_2x2(src1, src1_stride);
97
98 uint8x8_t blend = alpha_blend_a64_u8x8(m, s0, s1);
99
100 store_u8x2_strided_x2(dst, dst_stride, blend);
101
102 mask += 2;
103 src0 += 2 * src0_stride;
104 src1 += 2 * src1_stride;
105 dst += 2 * dst_stride;
106 h -= 2;
107 } while (h != 0);
108 } else {
109 aom_blend_a64_vmask_c(dst, dst_stride, src0, src0_stride, src1, src1_stride,
110 mask, w, h);
111 }
112 }
113