xref: /aosp_15_r20/external/libvpx/vp8/encoder/loongarch/vp8_quantize_lsx.c (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1 /*
2  *  Copyright (c) 2022 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <stdint.h>
12 #include "./vp8_rtcd.h"
13 #include "vpx_util/loongson_intrinsics.h"
14 #include "vp8/encoder/block.h"
15 
16 #define BOOST_QUANT1(_in0, _in1, _in2, _ui)               \
17   {                                                       \
18     if (boost_temp[0] <= __lsx_vpickve2gr_h(_in0, _ui)) { \
19       if (__lsx_vpickve2gr_h(_in1, _ui)) {                \
20         eob = _ui;                                        \
21         boost_temp = zbin_boost;                          \
22       } else {                                            \
23         boost_temp++;                                     \
24       }                                                   \
25     } else {                                              \
26       _in2 = __lsx_vinsgr2vr_h(_in2, 0, _ui);             \
27       boost_temp++;                                       \
28     }                                                     \
29   }
30 
31 #define BOOST_QUANT2(_in0, _in1, _in2, _ui)               \
32   {                                                       \
33     if (boost_temp[0] <= __lsx_vpickve2gr_h(_in0, _ui)) { \
34       if (__lsx_vpickve2gr_h(_in1, _ui)) {                \
35         eob = _ui + 8;                                    \
36         boost_temp = zbin_boost;                          \
37       } else {                                            \
38         boost_temp++;                                     \
39       }                                                   \
40     } else {                                              \
41       _in2 = __lsx_vinsgr2vr_h(_in2, 0, _ui);             \
42       boost_temp++;                                       \
43     }                                                     \
44   }
45 
exact_regular_quantize_b_lsx(int16_t * zbin_boost,int16_t * coeff_ptr,int16_t * zbin,int16_t * round,int16_t * quant,int16_t * quant_shift,int16_t * de_quant,int16_t zbin_oq_in,int16_t * q_coeff,int16_t * dq_coeff)46 static int8_t exact_regular_quantize_b_lsx(
47     int16_t *zbin_boost, int16_t *coeff_ptr, int16_t *zbin, int16_t *round,
48     int16_t *quant, int16_t *quant_shift, int16_t *de_quant, int16_t zbin_oq_in,
49     int16_t *q_coeff, int16_t *dq_coeff) {
50   int32_t eob;
51   int16_t *boost_temp = zbin_boost;
52   __m128i inv_zig_zag = { 0x0C07040206050100, 0x0F0E0A090D0B0803 };
53   __m128i sign_z0, sign_z1, q_coeff0, q_coeff1;
54   __m128i z_bin0, z_bin1, zbin_o_q, x0, x1, sign_x0, sign_x1, de_quant0,
55       de_quant1;
56   __m128i z0, z1, round0, round1, quant0, quant2;
57   __m128i inv_zig_zag0, inv_zig_zag1;
58   __m128i zigzag_mask0 = { 0x0008000400010000, 0x0006000300020005 };
59   __m128i zigzag_mask1 = { 0x000A000D000C0009, 0X000F000E000B0007 };
60   __m128i tmp0, tmp1, tmp2, tmp3;
61   __m128i zero = __lsx_vldi(0);
62 
63   zbin_o_q = __lsx_vreplgr2vr_h(zbin_oq_in);
64   inv_zig_zag0 = __lsx_vilvl_b(zero, inv_zig_zag);
65   inv_zig_zag1 = __lsx_vilvh_b(zero, inv_zig_zag);
66   eob = -1;
67   DUP4_ARG2(__lsx_vld, coeff_ptr, 0, coeff_ptr, 16, round, 0, round, 16, tmp0,
68             tmp1, tmp2, tmp3);
69   DUP4_ARG3(__lsx_vshuf_h, zigzag_mask0, tmp1, tmp0, zigzag_mask1, tmp1, tmp0,
70             zigzag_mask0, tmp3, tmp2, zigzag_mask1, tmp3, tmp2, z0, z1, round0,
71             round1);
72   DUP4_ARG2(__lsx_vld, quant, 0, quant, 16, zbin, 0, zbin, 16, tmp0, tmp1, tmp2,
73             tmp3);
74   DUP4_ARG3(__lsx_vshuf_h, zigzag_mask0, tmp1, tmp0, zigzag_mask1, tmp1, tmp0,
75             zigzag_mask0, tmp3, tmp2, zigzag_mask1, tmp3, tmp2, quant0, quant2,
76             z_bin0, z_bin1);
77   DUP2_ARG2(__lsx_vsrai_h, z0, 15, z1, 15, sign_z0, sign_z1);
78   DUP2_ARG2(__lsx_vadda_h, z0, zero, z1, zero, x0, x1);
79   DUP2_ARG2(__lsx_vsub_h, x0, z_bin0, x1, z_bin1, z_bin0, z_bin1);
80   DUP2_ARG2(__lsx_vsub_h, z_bin0, zbin_o_q, z_bin1, zbin_o_q, z_bin0, z_bin1);
81   DUP2_ARG2(__lsx_vmulwev_w_h, quant0, round0, quant2, round1, tmp0, tmp2);
82   DUP2_ARG2(__lsx_vmulwod_w_h, quant0, round0, quant2, round1, tmp1, tmp3);
83   DUP2_ARG3(__lsx_vmaddwev_w_h, tmp0, quant0, x0, tmp2, quant2, x1, tmp0, tmp2);
84   DUP2_ARG3(__lsx_vmaddwod_w_h, tmp1, quant0, x0, tmp3, quant2, x1, tmp1, tmp3);
85   DUP2_ARG2(__lsx_vpackod_h, tmp1, tmp0, tmp3, tmp2, q_coeff0, q_coeff1);
86 
87   DUP2_ARG2(__lsx_vld, quant_shift, 0, quant_shift, 16, tmp1, tmp3);
88   DUP2_ARG3(__lsx_vshuf_h, zigzag_mask0, tmp3, tmp1, zigzag_mask1, tmp3, tmp1,
89             quant0, quant2);
90   DUP2_ARG2(__lsx_vadd_h, x0, round0, x1, round1, x0, x1);
91   DUP2_ARG2(__lsx_vmulwev_w_h, quant0, q_coeff0, quant2, q_coeff1, tmp0, tmp2);
92   DUP2_ARG2(__lsx_vmulwod_w_h, quant0, q_coeff0, quant2, q_coeff1, tmp1, tmp3);
93   DUP2_ARG3(__lsx_vmaddwev_w_h, tmp0, quant0, x0, tmp2, quant2, x1, tmp0, tmp2);
94   DUP2_ARG3(__lsx_vmaddwod_w_h, tmp1, quant0, x0, tmp3, quant2, x1, tmp1, tmp3);
95   DUP2_ARG2(__lsx_vpackod_h, tmp1, tmp0, tmp3, tmp2, x0, x1);
96   DUP2_ARG2(__lsx_vxor_v, x0, sign_z0, x1, sign_z1, sign_x0, sign_x1);
97   DUP2_ARG2(__lsx_vsub_h, sign_x0, sign_z0, sign_x1, sign_z1, sign_x0, sign_x1);
98 
99   BOOST_QUANT1(z_bin0, x0, sign_x0, 0);
100   BOOST_QUANT1(z_bin0, x0, sign_x0, 1);
101   BOOST_QUANT1(z_bin0, x0, sign_x0, 2);
102   BOOST_QUANT1(z_bin0, x0, sign_x0, 3);
103   BOOST_QUANT1(z_bin0, x0, sign_x0, 4);
104   BOOST_QUANT1(z_bin0, x0, sign_x0, 5);
105   BOOST_QUANT1(z_bin0, x0, sign_x0, 6);
106   BOOST_QUANT1(z_bin0, x0, sign_x0, 7);
107 
108   BOOST_QUANT2(z_bin1, x1, sign_x1, 0);
109   BOOST_QUANT2(z_bin1, x1, sign_x1, 1);
110   BOOST_QUANT2(z_bin1, x1, sign_x1, 2);
111   BOOST_QUANT2(z_bin1, x1, sign_x1, 3);
112   BOOST_QUANT2(z_bin1, x1, sign_x1, 4);
113   BOOST_QUANT2(z_bin1, x1, sign_x1, 5);
114   BOOST_QUANT2(z_bin1, x1, sign_x1, 6);
115   BOOST_QUANT2(z_bin1, x1, sign_x1, 7);
116 
117   DUP2_ARG2(__lsx_vld, de_quant, 0, de_quant, 16, de_quant0, de_quant1);
118   DUP2_ARG3(__lsx_vshuf_h, inv_zig_zag0, sign_x1, sign_x0, inv_zig_zag1,
119             sign_x1, sign_x0, q_coeff0, q_coeff1);
120   DUP2_ARG2(__lsx_vmul_h, de_quant0, q_coeff0, de_quant1, q_coeff1, de_quant0,
121             de_quant1);
122   __lsx_vst(q_coeff0, q_coeff, 0);
123   __lsx_vst(q_coeff1, q_coeff, 16);
124   __lsx_vst(de_quant0, dq_coeff, 0);
125   __lsx_vst(de_quant1, dq_coeff, 16);
126 
127   return (int8_t)(eob + 1);
128 }
129 
vp8_regular_quantize_b_lsx(BLOCK * b,BLOCKD * d)130 void vp8_regular_quantize_b_lsx(BLOCK *b, BLOCKD *d) {
131   int16_t *zbin_boost_ptr = b->zrun_zbin_boost;
132   int16_t *coeff_ptr = b->coeff;
133   int16_t *zbin_ptr = b->zbin;
134   int16_t *round_ptr = b->round;
135   int16_t *quant_ptr = b->quant;
136   int16_t *quant_shift_ptr = b->quant_shift;
137   int16_t *qcoeff_ptr = d->qcoeff;
138   int16_t *dqcoeff_ptr = d->dqcoeff;
139   int16_t *dequant_ptr = d->dequant;
140   int16_t zbin_oq_value = b->zbin_extra;
141 
142   *d->eob = exact_regular_quantize_b_lsx(
143       zbin_boost_ptr, coeff_ptr, zbin_ptr, round_ptr, quant_ptr,
144       quant_shift_ptr, dequant_ptr, zbin_oq_value, qcoeff_ptr, dqcoeff_ptr);
145 }
146