xref: /aosp_15_r20/external/libopus/celt/vq.c (revision a58d3d2adb790c104798cd88c8a3aff4fa8b82cc)
1*a58d3d2aSXin Li /* Copyright (c) 2007-2008 CSIRO
2*a58d3d2aSXin Li    Copyright (c) 2007-2009 Xiph.Org Foundation
3*a58d3d2aSXin Li    Written by Jean-Marc Valin */
4*a58d3d2aSXin Li /*
5*a58d3d2aSXin Li    Redistribution and use in source and binary forms, with or without
6*a58d3d2aSXin Li    modification, are permitted provided that the following conditions
7*a58d3d2aSXin Li    are met:
8*a58d3d2aSXin Li 
9*a58d3d2aSXin Li    - Redistributions of source code must retain the above copyright
10*a58d3d2aSXin Li    notice, this list of conditions and the following disclaimer.
11*a58d3d2aSXin Li 
12*a58d3d2aSXin Li    - Redistributions in binary form must reproduce the above copyright
13*a58d3d2aSXin Li    notice, this list of conditions and the following disclaimer in the
14*a58d3d2aSXin Li    documentation and/or other materials provided with the distribution.
15*a58d3d2aSXin Li 
16*a58d3d2aSXin Li    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17*a58d3d2aSXin Li    ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18*a58d3d2aSXin Li    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19*a58d3d2aSXin Li    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
20*a58d3d2aSXin Li    OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21*a58d3d2aSXin Li    EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22*a58d3d2aSXin Li    PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23*a58d3d2aSXin Li    PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24*a58d3d2aSXin Li    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25*a58d3d2aSXin Li    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26*a58d3d2aSXin Li    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27*a58d3d2aSXin Li */
28*a58d3d2aSXin Li 
29*a58d3d2aSXin Li #ifdef HAVE_CONFIG_H
30*a58d3d2aSXin Li #include "config.h"
31*a58d3d2aSXin Li #endif
32*a58d3d2aSXin Li 
33*a58d3d2aSXin Li #include "mathops.h"
34*a58d3d2aSXin Li #include "cwrs.h"
35*a58d3d2aSXin Li #include "vq.h"
36*a58d3d2aSXin Li #include "arch.h"
37*a58d3d2aSXin Li #include "os_support.h"
38*a58d3d2aSXin Li #include "bands.h"
39*a58d3d2aSXin Li #include "rate.h"
40*a58d3d2aSXin Li #include "pitch.h"
41*a58d3d2aSXin Li 
42*a58d3d2aSXin Li #if defined(MIPSr1_ASM)
43*a58d3d2aSXin Li #include "mips/vq_mipsr1.h"
44*a58d3d2aSXin Li #endif
45*a58d3d2aSXin Li 
46*a58d3d2aSXin Li #ifndef OVERRIDE_vq_exp_rotation1
exp_rotation1(celt_norm * X,int len,int stride,opus_val16 c,opus_val16 s)47*a58d3d2aSXin Li static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s)
48*a58d3d2aSXin Li {
49*a58d3d2aSXin Li    int i;
50*a58d3d2aSXin Li    opus_val16 ms;
51*a58d3d2aSXin Li    celt_norm *Xptr;
52*a58d3d2aSXin Li    Xptr = X;
53*a58d3d2aSXin Li    ms = NEG16(s);
54*a58d3d2aSXin Li    for (i=0;i<len-stride;i++)
55*a58d3d2aSXin Li    {
56*a58d3d2aSXin Li       celt_norm x1, x2;
57*a58d3d2aSXin Li       x1 = Xptr[0];
58*a58d3d2aSXin Li       x2 = Xptr[stride];
59*a58d3d2aSXin Li       Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2),  s, x1), 15));
60*a58d3d2aSXin Li       *Xptr++      = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15));
61*a58d3d2aSXin Li    }
62*a58d3d2aSXin Li    Xptr = &X[len-2*stride-1];
63*a58d3d2aSXin Li    for (i=len-2*stride-1;i>=0;i--)
64*a58d3d2aSXin Li    {
65*a58d3d2aSXin Li       celt_norm x1, x2;
66*a58d3d2aSXin Li       x1 = Xptr[0];
67*a58d3d2aSXin Li       x2 = Xptr[stride];
68*a58d3d2aSXin Li       Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2),  s, x1), 15));
69*a58d3d2aSXin Li       *Xptr--      = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15));
70*a58d3d2aSXin Li    }
71*a58d3d2aSXin Li }
72*a58d3d2aSXin Li #endif /* OVERRIDE_vq_exp_rotation1 */
73*a58d3d2aSXin Li 
exp_rotation(celt_norm * X,int len,int dir,int stride,int K,int spread)74*a58d3d2aSXin Li void exp_rotation(celt_norm *X, int len, int dir, int stride, int K, int spread)
75*a58d3d2aSXin Li {
76*a58d3d2aSXin Li    static const int SPREAD_FACTOR[3]={15,10,5};
77*a58d3d2aSXin Li    int i;
78*a58d3d2aSXin Li    opus_val16 c, s;
79*a58d3d2aSXin Li    opus_val16 gain, theta;
80*a58d3d2aSXin Li    int stride2=0;
81*a58d3d2aSXin Li    int factor;
82*a58d3d2aSXin Li 
83*a58d3d2aSXin Li    if (2*K>=len || spread==SPREAD_NONE)
84*a58d3d2aSXin Li       return;
85*a58d3d2aSXin Li    factor = SPREAD_FACTOR[spread-1];
86*a58d3d2aSXin Li 
87*a58d3d2aSXin Li    gain = celt_div((opus_val32)MULT16_16(Q15_ONE,len),(opus_val32)(len+factor*K));
88*a58d3d2aSXin Li    theta = HALF16(MULT16_16_Q15(gain,gain));
89*a58d3d2aSXin Li 
90*a58d3d2aSXin Li    c = celt_cos_norm(EXTEND32(theta));
91*a58d3d2aSXin Li    s = celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /*  sin(theta) */
92*a58d3d2aSXin Li 
93*a58d3d2aSXin Li    if (len>=8*stride)
94*a58d3d2aSXin Li    {
95*a58d3d2aSXin Li       stride2 = 1;
96*a58d3d2aSXin Li       /* This is just a simple (equivalent) way of computing sqrt(len/stride) with rounding.
97*a58d3d2aSXin Li          It's basically incrementing long as (stride2+0.5)^2 < len/stride. */
98*a58d3d2aSXin Li       while ((stride2*stride2+stride2)*stride + (stride>>2) < len)
99*a58d3d2aSXin Li          stride2++;
100*a58d3d2aSXin Li    }
101*a58d3d2aSXin Li    /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for
102*a58d3d2aSXin Li       extract_collapse_mask().*/
103*a58d3d2aSXin Li    len = celt_udiv(len, stride);
104*a58d3d2aSXin Li    for (i=0;i<stride;i++)
105*a58d3d2aSXin Li    {
106*a58d3d2aSXin Li       if (dir < 0)
107*a58d3d2aSXin Li       {
108*a58d3d2aSXin Li          if (stride2)
109*a58d3d2aSXin Li             exp_rotation1(X+i*len, len, stride2, s, c);
110*a58d3d2aSXin Li          exp_rotation1(X+i*len, len, 1, c, s);
111*a58d3d2aSXin Li       } else {
112*a58d3d2aSXin Li          exp_rotation1(X+i*len, len, 1, c, -s);
113*a58d3d2aSXin Li          if (stride2)
114*a58d3d2aSXin Li             exp_rotation1(X+i*len, len, stride2, s, -c);
115*a58d3d2aSXin Li       }
116*a58d3d2aSXin Li    }
117*a58d3d2aSXin Li }
118*a58d3d2aSXin Li 
119*a58d3d2aSXin Li /** Takes the pitch vector and the decoded residual vector, computes the gain
120*a58d3d2aSXin Li     that will give ||p+g*y||=1 and mixes the residual with the pitch. */
normalise_residual(int * OPUS_RESTRICT iy,celt_norm * OPUS_RESTRICT X,int N,opus_val32 Ryy,opus_val16 gain)121*a58d3d2aSXin Li static void normalise_residual(int * OPUS_RESTRICT iy, celt_norm * OPUS_RESTRICT X,
122*a58d3d2aSXin Li       int N, opus_val32 Ryy, opus_val16 gain)
123*a58d3d2aSXin Li {
124*a58d3d2aSXin Li    int i;
125*a58d3d2aSXin Li #ifdef FIXED_POINT
126*a58d3d2aSXin Li    int k;
127*a58d3d2aSXin Li #endif
128*a58d3d2aSXin Li    opus_val32 t;
129*a58d3d2aSXin Li    opus_val16 g;
130*a58d3d2aSXin Li 
131*a58d3d2aSXin Li #ifdef FIXED_POINT
132*a58d3d2aSXin Li    k = celt_ilog2(Ryy)>>1;
133*a58d3d2aSXin Li #endif
134*a58d3d2aSXin Li    t = VSHR32(Ryy, 2*(k-7));
135*a58d3d2aSXin Li    g = MULT16_16_P15(celt_rsqrt_norm(t),gain);
136*a58d3d2aSXin Li 
137*a58d3d2aSXin Li    i=0;
138*a58d3d2aSXin Li    do
139*a58d3d2aSXin Li       X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1));
140*a58d3d2aSXin Li    while (++i < N);
141*a58d3d2aSXin Li }
142*a58d3d2aSXin Li 
extract_collapse_mask(int * iy,int N,int B)143*a58d3d2aSXin Li static unsigned extract_collapse_mask(int *iy, int N, int B)
144*a58d3d2aSXin Li {
145*a58d3d2aSXin Li    unsigned collapse_mask;
146*a58d3d2aSXin Li    int N0;
147*a58d3d2aSXin Li    int i;
148*a58d3d2aSXin Li    if (B<=1)
149*a58d3d2aSXin Li       return 1;
150*a58d3d2aSXin Li    /*NOTE: As a minor optimization, we could be passing around log2(B), not B, for both this and for
151*a58d3d2aSXin Li       exp_rotation().*/
152*a58d3d2aSXin Li    N0 = celt_udiv(N, B);
153*a58d3d2aSXin Li    collapse_mask = 0;
154*a58d3d2aSXin Li    i=0; do {
155*a58d3d2aSXin Li       int j;
156*a58d3d2aSXin Li       unsigned tmp=0;
157*a58d3d2aSXin Li       j=0; do {
158*a58d3d2aSXin Li          tmp |= iy[i*N0+j];
159*a58d3d2aSXin Li       } while (++j<N0);
160*a58d3d2aSXin Li       collapse_mask |= (tmp!=0)<<i;
161*a58d3d2aSXin Li    } while (++i<B);
162*a58d3d2aSXin Li    return collapse_mask;
163*a58d3d2aSXin Li }
164*a58d3d2aSXin Li 
op_pvq_search_c(celt_norm * X,int * iy,int K,int N,int arch)165*a58d3d2aSXin Li opus_val16 op_pvq_search_c(celt_norm *X, int *iy, int K, int N, int arch)
166*a58d3d2aSXin Li {
167*a58d3d2aSXin Li    VARDECL(celt_norm, y);
168*a58d3d2aSXin Li    VARDECL(int, signx);
169*a58d3d2aSXin Li    int i, j;
170*a58d3d2aSXin Li    int pulsesLeft;
171*a58d3d2aSXin Li    opus_val32 sum;
172*a58d3d2aSXin Li    opus_val32 xy;
173*a58d3d2aSXin Li    opus_val16 yy;
174*a58d3d2aSXin Li    SAVE_STACK;
175*a58d3d2aSXin Li 
176*a58d3d2aSXin Li    (void)arch;
177*a58d3d2aSXin Li    ALLOC(y, N, celt_norm);
178*a58d3d2aSXin Li    ALLOC(signx, N, int);
179*a58d3d2aSXin Li 
180*a58d3d2aSXin Li    /* Get rid of the sign */
181*a58d3d2aSXin Li    sum = 0;
182*a58d3d2aSXin Li    j=0; do {
183*a58d3d2aSXin Li       signx[j] = X[j]<0;
184*a58d3d2aSXin Li       /* OPT: Make sure the compiler doesn't use a branch on ABS16(). */
185*a58d3d2aSXin Li       X[j] = ABS16(X[j]);
186*a58d3d2aSXin Li       iy[j] = 0;
187*a58d3d2aSXin Li       y[j] = 0;
188*a58d3d2aSXin Li    } while (++j<N);
189*a58d3d2aSXin Li 
190*a58d3d2aSXin Li    xy = yy = 0;
191*a58d3d2aSXin Li 
192*a58d3d2aSXin Li    pulsesLeft = K;
193*a58d3d2aSXin Li 
194*a58d3d2aSXin Li    /* Do a pre-search by projecting on the pyramid */
195*a58d3d2aSXin Li    if (K > (N>>1))
196*a58d3d2aSXin Li    {
197*a58d3d2aSXin Li       opus_val16 rcp;
198*a58d3d2aSXin Li       j=0; do {
199*a58d3d2aSXin Li          sum += X[j];
200*a58d3d2aSXin Li       }  while (++j<N);
201*a58d3d2aSXin Li 
202*a58d3d2aSXin Li       /* If X is too small, just replace it with a pulse at 0 */
203*a58d3d2aSXin Li #ifdef FIXED_POINT
204*a58d3d2aSXin Li       if (sum <= K)
205*a58d3d2aSXin Li #else
206*a58d3d2aSXin Li       /* Prevents infinities and NaNs from causing too many pulses
207*a58d3d2aSXin Li          to be allocated. 64 is an approximation of infinity here. */
208*a58d3d2aSXin Li       if (!(sum > EPSILON && sum < 64))
209*a58d3d2aSXin Li #endif
210*a58d3d2aSXin Li       {
211*a58d3d2aSXin Li          X[0] = QCONST16(1.f,14);
212*a58d3d2aSXin Li          j=1; do
213*a58d3d2aSXin Li             X[j]=0;
214*a58d3d2aSXin Li          while (++j<N);
215*a58d3d2aSXin Li          sum = QCONST16(1.f,14);
216*a58d3d2aSXin Li       }
217*a58d3d2aSXin Li #ifdef FIXED_POINT
218*a58d3d2aSXin Li       rcp = EXTRACT16(MULT16_32_Q16(K, celt_rcp(sum)));
219*a58d3d2aSXin Li #else
220*a58d3d2aSXin Li       /* Using K+e with e < 1 guarantees we cannot get more than K pulses. */
221*a58d3d2aSXin Li       rcp = EXTRACT16(MULT16_32_Q16(K+0.8f, celt_rcp(sum)));
222*a58d3d2aSXin Li #endif
223*a58d3d2aSXin Li       j=0; do {
224*a58d3d2aSXin Li #ifdef FIXED_POINT
225*a58d3d2aSXin Li          /* It's really important to round *towards zero* here */
226*a58d3d2aSXin Li          iy[j] = MULT16_16_Q15(X[j],rcp);
227*a58d3d2aSXin Li #else
228*a58d3d2aSXin Li          iy[j] = (int)floor(rcp*X[j]);
229*a58d3d2aSXin Li #endif
230*a58d3d2aSXin Li          y[j] = (celt_norm)iy[j];
231*a58d3d2aSXin Li          yy = MAC16_16(yy, y[j],y[j]);
232*a58d3d2aSXin Li          xy = MAC16_16(xy, X[j],y[j]);
233*a58d3d2aSXin Li          y[j] *= 2;
234*a58d3d2aSXin Li          pulsesLeft -= iy[j];
235*a58d3d2aSXin Li       }  while (++j<N);
236*a58d3d2aSXin Li    }
237*a58d3d2aSXin Li    celt_sig_assert(pulsesLeft>=0);
238*a58d3d2aSXin Li 
239*a58d3d2aSXin Li    /* This should never happen, but just in case it does (e.g. on silence)
240*a58d3d2aSXin Li       we fill the first bin with pulses. */
241*a58d3d2aSXin Li #ifdef FIXED_POINT_DEBUG
242*a58d3d2aSXin Li    celt_sig_assert(pulsesLeft<=N+3);
243*a58d3d2aSXin Li #endif
244*a58d3d2aSXin Li    if (pulsesLeft > N+3)
245*a58d3d2aSXin Li    {
246*a58d3d2aSXin Li       opus_val16 tmp = (opus_val16)pulsesLeft;
247*a58d3d2aSXin Li       yy = MAC16_16(yy, tmp, tmp);
248*a58d3d2aSXin Li       yy = MAC16_16(yy, tmp, y[0]);
249*a58d3d2aSXin Li       iy[0] += pulsesLeft;
250*a58d3d2aSXin Li       pulsesLeft=0;
251*a58d3d2aSXin Li    }
252*a58d3d2aSXin Li 
253*a58d3d2aSXin Li    for (i=0;i<pulsesLeft;i++)
254*a58d3d2aSXin Li    {
255*a58d3d2aSXin Li       opus_val16 Rxy, Ryy;
256*a58d3d2aSXin Li       int best_id;
257*a58d3d2aSXin Li       opus_val32 best_num;
258*a58d3d2aSXin Li       opus_val16 best_den;
259*a58d3d2aSXin Li #ifdef FIXED_POINT
260*a58d3d2aSXin Li       int rshift;
261*a58d3d2aSXin Li #endif
262*a58d3d2aSXin Li #ifdef FIXED_POINT
263*a58d3d2aSXin Li       rshift = 1+celt_ilog2(K-pulsesLeft+i+1);
264*a58d3d2aSXin Li #endif
265*a58d3d2aSXin Li       best_id = 0;
266*a58d3d2aSXin Li       /* The squared magnitude term gets added anyway, so we might as well
267*a58d3d2aSXin Li          add it outside the loop */
268*a58d3d2aSXin Li       yy = ADD16(yy, 1);
269*a58d3d2aSXin Li 
270*a58d3d2aSXin Li       /* Calculations for position 0 are out of the loop, in part to reduce
271*a58d3d2aSXin Li          mispredicted branches (since the if condition is usually false)
272*a58d3d2aSXin Li          in the loop. */
273*a58d3d2aSXin Li       /* Temporary sums of the new pulse(s) */
274*a58d3d2aSXin Li       Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[0])),rshift));
275*a58d3d2aSXin Li       /* We're multiplying y[j] by two so we don't have to do it here */
276*a58d3d2aSXin Li       Ryy = ADD16(yy, y[0]);
277*a58d3d2aSXin Li 
278*a58d3d2aSXin Li       /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that
279*a58d3d2aSXin Li          Rxy is positive because the sign is pre-computed) */
280*a58d3d2aSXin Li       Rxy = MULT16_16_Q15(Rxy,Rxy);
281*a58d3d2aSXin Li       best_den = Ryy;
282*a58d3d2aSXin Li       best_num = Rxy;
283*a58d3d2aSXin Li       j=1;
284*a58d3d2aSXin Li       do {
285*a58d3d2aSXin Li          /* Temporary sums of the new pulse(s) */
286*a58d3d2aSXin Li          Rxy = EXTRACT16(SHR32(ADD32(xy, EXTEND32(X[j])),rshift));
287*a58d3d2aSXin Li          /* We're multiplying y[j] by two so we don't have to do it here */
288*a58d3d2aSXin Li          Ryy = ADD16(yy, y[j]);
289*a58d3d2aSXin Li 
290*a58d3d2aSXin Li          /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that
291*a58d3d2aSXin Li             Rxy is positive because the sign is pre-computed) */
292*a58d3d2aSXin Li          Rxy = MULT16_16_Q15(Rxy,Rxy);
293*a58d3d2aSXin Li          /* The idea is to check for num/den >= best_num/best_den, but that way
294*a58d3d2aSXin Li             we can do it without any division */
295*a58d3d2aSXin Li          /* OPT: It's not clear whether a cmov is faster than a branch here
296*a58d3d2aSXin Li             since the condition is more often false than true and using
297*a58d3d2aSXin Li             a cmov introduces data dependencies across iterations. The optimal
298*a58d3d2aSXin Li             choice may be architecture-dependent. */
299*a58d3d2aSXin Li          if (opus_unlikely(MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num)))
300*a58d3d2aSXin Li          {
301*a58d3d2aSXin Li             best_den = Ryy;
302*a58d3d2aSXin Li             best_num = Rxy;
303*a58d3d2aSXin Li             best_id = j;
304*a58d3d2aSXin Li          }
305*a58d3d2aSXin Li       } while (++j<N);
306*a58d3d2aSXin Li 
307*a58d3d2aSXin Li       /* Updating the sums of the new pulse(s) */
308*a58d3d2aSXin Li       xy = ADD32(xy, EXTEND32(X[best_id]));
309*a58d3d2aSXin Li       /* We're multiplying y[j] by two so we don't have to do it here */
310*a58d3d2aSXin Li       yy = ADD16(yy, y[best_id]);
311*a58d3d2aSXin Li 
312*a58d3d2aSXin Li       /* Only now that we've made the final choice, update y/iy */
313*a58d3d2aSXin Li       /* Multiplying y[j] by 2 so we don't have to do it everywhere else */
314*a58d3d2aSXin Li       y[best_id] += 2;
315*a58d3d2aSXin Li       iy[best_id]++;
316*a58d3d2aSXin Li    }
317*a58d3d2aSXin Li 
318*a58d3d2aSXin Li    /* Put the original sign back */
319*a58d3d2aSXin Li    j=0;
320*a58d3d2aSXin Li    do {
321*a58d3d2aSXin Li       /*iy[j] = signx[j] ? -iy[j] : iy[j];*/
322*a58d3d2aSXin Li       /* OPT: The is more likely to be compiled without a branch than the code above
323*a58d3d2aSXin Li          but has the same performance otherwise. */
324*a58d3d2aSXin Li       iy[j] = (iy[j]^-signx[j]) + signx[j];
325*a58d3d2aSXin Li    } while (++j<N);
326*a58d3d2aSXin Li    RESTORE_STACK;
327*a58d3d2aSXin Li    return yy;
328*a58d3d2aSXin Li }
329*a58d3d2aSXin Li 
alg_quant(celt_norm * X,int N,int K,int spread,int B,ec_enc * enc,opus_val16 gain,int resynth,int arch)330*a58d3d2aSXin Li unsigned alg_quant(celt_norm *X, int N, int K, int spread, int B, ec_enc *enc,
331*a58d3d2aSXin Li       opus_val16 gain, int resynth, int arch)
332*a58d3d2aSXin Li {
333*a58d3d2aSXin Li    VARDECL(int, iy);
334*a58d3d2aSXin Li    opus_val16 yy;
335*a58d3d2aSXin Li    unsigned collapse_mask;
336*a58d3d2aSXin Li    SAVE_STACK;
337*a58d3d2aSXin Li 
338*a58d3d2aSXin Li    celt_assert2(K>0, "alg_quant() needs at least one pulse");
339*a58d3d2aSXin Li    celt_assert2(N>1, "alg_quant() needs at least two dimensions");
340*a58d3d2aSXin Li 
341*a58d3d2aSXin Li    /* Covers vectorization by up to 4. */
342*a58d3d2aSXin Li    ALLOC(iy, N+3, int);
343*a58d3d2aSXin Li 
344*a58d3d2aSXin Li    exp_rotation(X, N, 1, B, K, spread);
345*a58d3d2aSXin Li 
346*a58d3d2aSXin Li    yy = op_pvq_search(X, iy, K, N, arch);
347*a58d3d2aSXin Li 
348*a58d3d2aSXin Li    encode_pulses(iy, N, K, enc);
349*a58d3d2aSXin Li 
350*a58d3d2aSXin Li    if (resynth)
351*a58d3d2aSXin Li    {
352*a58d3d2aSXin Li       normalise_residual(iy, X, N, yy, gain);
353*a58d3d2aSXin Li       exp_rotation(X, N, -1, B, K, spread);
354*a58d3d2aSXin Li    }
355*a58d3d2aSXin Li 
356*a58d3d2aSXin Li    collapse_mask = extract_collapse_mask(iy, N, B);
357*a58d3d2aSXin Li    RESTORE_STACK;
358*a58d3d2aSXin Li    return collapse_mask;
359*a58d3d2aSXin Li }
360*a58d3d2aSXin Li 
361*a58d3d2aSXin Li /** Decode pulse vector and combine the result with the pitch vector to produce
362*a58d3d2aSXin Li     the final normalised signal in the current band. */
alg_unquant(celt_norm * X,int N,int K,int spread,int B,ec_dec * dec,opus_val16 gain)363*a58d3d2aSXin Li unsigned alg_unquant(celt_norm *X, int N, int K, int spread, int B,
364*a58d3d2aSXin Li       ec_dec *dec, opus_val16 gain)
365*a58d3d2aSXin Li {
366*a58d3d2aSXin Li    opus_val32 Ryy;
367*a58d3d2aSXin Li    unsigned collapse_mask;
368*a58d3d2aSXin Li    VARDECL(int, iy);
369*a58d3d2aSXin Li    SAVE_STACK;
370*a58d3d2aSXin Li 
371*a58d3d2aSXin Li    celt_assert2(K>0, "alg_unquant() needs at least one pulse");
372*a58d3d2aSXin Li    celt_assert2(N>1, "alg_unquant() needs at least two dimensions");
373*a58d3d2aSXin Li    ALLOC(iy, N, int);
374*a58d3d2aSXin Li    Ryy = decode_pulses(iy, N, K, dec);
375*a58d3d2aSXin Li    normalise_residual(iy, X, N, Ryy, gain);
376*a58d3d2aSXin Li    exp_rotation(X, N, -1, B, K, spread);
377*a58d3d2aSXin Li    collapse_mask = extract_collapse_mask(iy, N, B);
378*a58d3d2aSXin Li    RESTORE_STACK;
379*a58d3d2aSXin Li    return collapse_mask;
380*a58d3d2aSXin Li }
381*a58d3d2aSXin Li 
382*a58d3d2aSXin Li #ifndef OVERRIDE_renormalise_vector
renormalise_vector(celt_norm * X,int N,opus_val16 gain,int arch)383*a58d3d2aSXin Li void renormalise_vector(celt_norm *X, int N, opus_val16 gain, int arch)
384*a58d3d2aSXin Li {
385*a58d3d2aSXin Li    int i;
386*a58d3d2aSXin Li #ifdef FIXED_POINT
387*a58d3d2aSXin Li    int k;
388*a58d3d2aSXin Li #endif
389*a58d3d2aSXin Li    opus_val32 E;
390*a58d3d2aSXin Li    opus_val16 g;
391*a58d3d2aSXin Li    opus_val32 t;
392*a58d3d2aSXin Li    celt_norm *xptr;
393*a58d3d2aSXin Li    E = EPSILON + celt_inner_prod(X, X, N, arch);
394*a58d3d2aSXin Li #ifdef FIXED_POINT
395*a58d3d2aSXin Li    k = celt_ilog2(E)>>1;
396*a58d3d2aSXin Li #endif
397*a58d3d2aSXin Li    t = VSHR32(E, 2*(k-7));
398*a58d3d2aSXin Li    g = MULT16_16_P15(celt_rsqrt_norm(t),gain);
399*a58d3d2aSXin Li 
400*a58d3d2aSXin Li    xptr = X;
401*a58d3d2aSXin Li    for (i=0;i<N;i++)
402*a58d3d2aSXin Li    {
403*a58d3d2aSXin Li       *xptr = EXTRACT16(PSHR32(MULT16_16(g, *xptr), k+1));
404*a58d3d2aSXin Li       xptr++;
405*a58d3d2aSXin Li    }
406*a58d3d2aSXin Li    /*return celt_sqrt(E);*/
407*a58d3d2aSXin Li }
408*a58d3d2aSXin Li #endif /* OVERRIDE_renormalise_vector */
409*a58d3d2aSXin Li 
stereo_itheta(const celt_norm * X,const celt_norm * Y,int stereo,int N,int arch)410*a58d3d2aSXin Li int stereo_itheta(const celt_norm *X, const celt_norm *Y, int stereo, int N, int arch)
411*a58d3d2aSXin Li {
412*a58d3d2aSXin Li    int i;
413*a58d3d2aSXin Li    int itheta;
414*a58d3d2aSXin Li    opus_val16 mid, side;
415*a58d3d2aSXin Li    opus_val32 Emid, Eside;
416*a58d3d2aSXin Li 
417*a58d3d2aSXin Li    Emid = Eside = EPSILON;
418*a58d3d2aSXin Li    if (stereo)
419*a58d3d2aSXin Li    {
420*a58d3d2aSXin Li       for (i=0;i<N;i++)
421*a58d3d2aSXin Li       {
422*a58d3d2aSXin Li          celt_norm m, s;
423*a58d3d2aSXin Li          m = ADD16(SHR16(X[i],1),SHR16(Y[i],1));
424*a58d3d2aSXin Li          s = SUB16(SHR16(X[i],1),SHR16(Y[i],1));
425*a58d3d2aSXin Li          Emid = MAC16_16(Emid, m, m);
426*a58d3d2aSXin Li          Eside = MAC16_16(Eside, s, s);
427*a58d3d2aSXin Li       }
428*a58d3d2aSXin Li    } else {
429*a58d3d2aSXin Li       Emid += celt_inner_prod(X, X, N, arch);
430*a58d3d2aSXin Li       Eside += celt_inner_prod(Y, Y, N, arch);
431*a58d3d2aSXin Li    }
432*a58d3d2aSXin Li    mid = celt_sqrt(Emid);
433*a58d3d2aSXin Li    side = celt_sqrt(Eside);
434*a58d3d2aSXin Li #ifdef FIXED_POINT
435*a58d3d2aSXin Li    /* 0.63662 = 2/pi */
436*a58d3d2aSXin Li    itheta = MULT16_16_Q15(QCONST16(0.63662f,15),celt_atan2p(side, mid));
437*a58d3d2aSXin Li #else
438*a58d3d2aSXin Li    itheta = (int)floor(.5f+16384*0.63662f*fast_atan2f(side,mid));
439*a58d3d2aSXin Li #endif
440*a58d3d2aSXin Li 
441*a58d3d2aSXin Li    return itheta;
442*a58d3d2aSXin Li }
443