1*a58d3d2aSXin Li /* Copyright (c) 2007-2008 CSIRO
2*a58d3d2aSXin Li Copyright (c) 2007-2008 Xiph.Org Foundation
3*a58d3d2aSXin Li Written by Jean-Marc Valin */
4*a58d3d2aSXin Li /*
5*a58d3d2aSXin Li Redistribution and use in source and binary forms, with or without
6*a58d3d2aSXin Li modification, are permitted provided that the following conditions
7*a58d3d2aSXin Li are met:
8*a58d3d2aSXin Li
9*a58d3d2aSXin Li - Redistributions of source code must retain the above copyright
10*a58d3d2aSXin Li notice, this list of conditions and the following disclaimer.
11*a58d3d2aSXin Li
12*a58d3d2aSXin Li - Redistributions in binary form must reproduce the above copyright
13*a58d3d2aSXin Li notice, this list of conditions and the following disclaimer in the
14*a58d3d2aSXin Li documentation and/or other materials provided with the distribution.
15*a58d3d2aSXin Li
16*a58d3d2aSXin Li THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17*a58d3d2aSXin Li ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18*a58d3d2aSXin Li LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19*a58d3d2aSXin Li A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
20*a58d3d2aSXin Li OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21*a58d3d2aSXin Li EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22*a58d3d2aSXin Li PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23*a58d3d2aSXin Li PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24*a58d3d2aSXin Li LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25*a58d3d2aSXin Li NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26*a58d3d2aSXin Li SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27*a58d3d2aSXin Li */
28*a58d3d2aSXin Li
29*a58d3d2aSXin Li /* This is a simple MDCT implementation that uses a N/4 complex FFT
30*a58d3d2aSXin Li to do most of the work. It should be relatively straightforward to
31*a58d3d2aSXin Li plug in pretty much and FFT here.
32*a58d3d2aSXin Li
33*a58d3d2aSXin Li This replaces the Vorbis FFT (and uses the exact same API), which
34*a58d3d2aSXin Li was a bit too messy and that was ending up duplicating code
35*a58d3d2aSXin Li (might as well use the same FFT everywhere).
36*a58d3d2aSXin Li
37*a58d3d2aSXin Li The algorithm is similar to (and inspired from) Fabrice Bellard's
38*a58d3d2aSXin Li MDCT implementation in FFMPEG, but has differences in signs, ordering
39*a58d3d2aSXin Li and scaling in many places.
40*a58d3d2aSXin Li */
41*a58d3d2aSXin Li #ifndef MDCT_MIPSR1_H__
42*a58d3d2aSXin Li #define MDCT_MIPSR1_H__
43*a58d3d2aSXin Li
44*a58d3d2aSXin Li #ifndef SKIP_CONFIG_H
45*a58d3d2aSXin Li #ifdef HAVE_CONFIG_H
46*a58d3d2aSXin Li #include "config.h"
47*a58d3d2aSXin Li #endif
48*a58d3d2aSXin Li #endif
49*a58d3d2aSXin Li
50*a58d3d2aSXin Li #include "mdct.h"
51*a58d3d2aSXin Li #include "kiss_fft.h"
52*a58d3d2aSXin Li #include "_kiss_fft_guts.h"
53*a58d3d2aSXin Li #include <math.h>
54*a58d3d2aSXin Li #include "os_support.h"
55*a58d3d2aSXin Li #include "mathops.h"
56*a58d3d2aSXin Li #include "stack_alloc.h"
57*a58d3d2aSXin Li
58*a58d3d2aSXin Li /* Forward MDCT trashes the input array */
59*a58d3d2aSXin Li #define OVERRIDE_clt_mdct_forward
clt_mdct_forward(const mdct_lookup * l,kiss_fft_scalar * in,kiss_fft_scalar * OPUS_RESTRICT out,const opus_val16 * window,int overlap,int shift,int stride,int arch)60*a58d3d2aSXin Li void clt_mdct_forward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar * OPUS_RESTRICT out,
61*a58d3d2aSXin Li const opus_val16 *window, int overlap, int shift, int stride, int arch)
62*a58d3d2aSXin Li {
63*a58d3d2aSXin Li int i;
64*a58d3d2aSXin Li int N, N2, N4;
65*a58d3d2aSXin Li VARDECL(kiss_fft_scalar, f);
66*a58d3d2aSXin Li VARDECL(kiss_fft_cpx, f2);
67*a58d3d2aSXin Li const kiss_fft_state *st = l->kfft[shift];
68*a58d3d2aSXin Li const kiss_twiddle_scalar *trig;
69*a58d3d2aSXin Li opus_val16 scale;
70*a58d3d2aSXin Li #ifdef FIXED_POINT
71*a58d3d2aSXin Li /* Allows us to scale with MULT16_32_Q16(), which is faster than
72*a58d3d2aSXin Li MULT16_32_Q15() on ARM. */
73*a58d3d2aSXin Li int scale_shift = st->scale_shift-1;
74*a58d3d2aSXin Li #endif
75*a58d3d2aSXin Li
76*a58d3d2aSXin Li (void)arch;
77*a58d3d2aSXin Li
78*a58d3d2aSXin Li SAVE_STACK;
79*a58d3d2aSXin Li scale = st->scale;
80*a58d3d2aSXin Li
81*a58d3d2aSXin Li N = l->n;
82*a58d3d2aSXin Li trig = l->trig;
83*a58d3d2aSXin Li for (i=0;i<shift;i++)
84*a58d3d2aSXin Li {
85*a58d3d2aSXin Li N >>= 1;
86*a58d3d2aSXin Li trig += N;
87*a58d3d2aSXin Li }
88*a58d3d2aSXin Li N2 = N>>1;
89*a58d3d2aSXin Li N4 = N>>2;
90*a58d3d2aSXin Li
91*a58d3d2aSXin Li ALLOC(f, N2, kiss_fft_scalar);
92*a58d3d2aSXin Li ALLOC(f2, N4, kiss_fft_cpx);
93*a58d3d2aSXin Li
94*a58d3d2aSXin Li /* Consider the input to be composed of four blocks: [a, b, c, d] */
95*a58d3d2aSXin Li /* Window, shuffle, fold */
96*a58d3d2aSXin Li {
97*a58d3d2aSXin Li /* Temp pointers to make it really clear to the compiler what we're doing */
98*a58d3d2aSXin Li const kiss_fft_scalar * OPUS_RESTRICT xp1 = in+(overlap>>1);
99*a58d3d2aSXin Li const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+N2-1+(overlap>>1);
100*a58d3d2aSXin Li kiss_fft_scalar * OPUS_RESTRICT yp = f;
101*a58d3d2aSXin Li const opus_val16 * OPUS_RESTRICT wp1 = window+(overlap>>1);
102*a58d3d2aSXin Li const opus_val16 * OPUS_RESTRICT wp2 = window+(overlap>>1)-1;
103*a58d3d2aSXin Li for(i=0;i<((overlap+3)>>2);i++)
104*a58d3d2aSXin Li {
105*a58d3d2aSXin Li /* Real part arranged as -d-cR, Imag part arranged as -b+aR*/
106*a58d3d2aSXin Li *yp++ = S_MUL_ADD(*wp2, xp1[N2],*wp1,*xp2);
107*a58d3d2aSXin Li *yp++ = S_MUL_SUB(*wp1, *xp1,*wp2, xp2[-N2]);
108*a58d3d2aSXin Li xp1+=2;
109*a58d3d2aSXin Li xp2-=2;
110*a58d3d2aSXin Li wp1+=2;
111*a58d3d2aSXin Li wp2-=2;
112*a58d3d2aSXin Li }
113*a58d3d2aSXin Li wp1 = window;
114*a58d3d2aSXin Li wp2 = window+overlap-1;
115*a58d3d2aSXin Li for(;i<N4-((overlap+3)>>2);i++)
116*a58d3d2aSXin Li {
117*a58d3d2aSXin Li /* Real part arranged as a-bR, Imag part arranged as -c-dR */
118*a58d3d2aSXin Li *yp++ = *xp2;
119*a58d3d2aSXin Li *yp++ = *xp1;
120*a58d3d2aSXin Li xp1+=2;
121*a58d3d2aSXin Li xp2-=2;
122*a58d3d2aSXin Li }
123*a58d3d2aSXin Li for(;i<N4;i++)
124*a58d3d2aSXin Li {
125*a58d3d2aSXin Li /* Real part arranged as a-bR, Imag part arranged as -c-dR */
126*a58d3d2aSXin Li *yp++ = S_MUL_SUB(*wp2, *xp2, *wp1, xp1[-N2]);
127*a58d3d2aSXin Li *yp++ = S_MUL_ADD(*wp2, *xp1, *wp1, xp2[N2]);
128*a58d3d2aSXin Li xp1+=2;
129*a58d3d2aSXin Li xp2-=2;
130*a58d3d2aSXin Li wp1+=2;
131*a58d3d2aSXin Li wp2-=2;
132*a58d3d2aSXin Li }
133*a58d3d2aSXin Li }
134*a58d3d2aSXin Li /* Pre-rotation */
135*a58d3d2aSXin Li {
136*a58d3d2aSXin Li kiss_fft_scalar * OPUS_RESTRICT yp = f;
137*a58d3d2aSXin Li const kiss_twiddle_scalar *t = &trig[0];
138*a58d3d2aSXin Li for(i=0;i<N4;i++)
139*a58d3d2aSXin Li {
140*a58d3d2aSXin Li kiss_fft_cpx yc;
141*a58d3d2aSXin Li kiss_twiddle_scalar t0, t1;
142*a58d3d2aSXin Li kiss_fft_scalar re, im, yr, yi;
143*a58d3d2aSXin Li t0 = t[i];
144*a58d3d2aSXin Li t1 = t[N4+i];
145*a58d3d2aSXin Li re = *yp++;
146*a58d3d2aSXin Li im = *yp++;
147*a58d3d2aSXin Li
148*a58d3d2aSXin Li yr = S_MUL_SUB(re,t0,im,t1);
149*a58d3d2aSXin Li yi = S_MUL_ADD(im,t0,re,t1);
150*a58d3d2aSXin Li
151*a58d3d2aSXin Li yc.r = yr;
152*a58d3d2aSXin Li yc.i = yi;
153*a58d3d2aSXin Li yc.r = PSHR32(MULT16_32_Q16(scale, yc.r), scale_shift);
154*a58d3d2aSXin Li yc.i = PSHR32(MULT16_32_Q16(scale, yc.i), scale_shift);
155*a58d3d2aSXin Li f2[st->bitrev[i]] = yc;
156*a58d3d2aSXin Li }
157*a58d3d2aSXin Li }
158*a58d3d2aSXin Li
159*a58d3d2aSXin Li /* N/4 complex FFT, does not downscale anymore */
160*a58d3d2aSXin Li opus_fft_impl(st, f2);
161*a58d3d2aSXin Li
162*a58d3d2aSXin Li /* Post-rotate */
163*a58d3d2aSXin Li {
164*a58d3d2aSXin Li /* Temp pointers to make it really clear to the compiler what we're doing */
165*a58d3d2aSXin Li const kiss_fft_cpx * OPUS_RESTRICT fp = f2;
166*a58d3d2aSXin Li kiss_fft_scalar * OPUS_RESTRICT yp1 = out;
167*a58d3d2aSXin Li kiss_fft_scalar * OPUS_RESTRICT yp2 = out+stride*(N2-1);
168*a58d3d2aSXin Li const kiss_twiddle_scalar *t = &trig[0];
169*a58d3d2aSXin Li /* Temp pointers to make it really clear to the compiler what we're doing */
170*a58d3d2aSXin Li for(i=0;i<N4;i++)
171*a58d3d2aSXin Li {
172*a58d3d2aSXin Li kiss_fft_scalar yr, yi;
173*a58d3d2aSXin Li yr = S_MUL_SUB(fp->i,t[N4+i] , fp->r,t[i]);
174*a58d3d2aSXin Li yi = S_MUL_ADD(fp->r,t[N4+i] ,fp->i,t[i]);
175*a58d3d2aSXin Li *yp1 = yr;
176*a58d3d2aSXin Li *yp2 = yi;
177*a58d3d2aSXin Li fp++;
178*a58d3d2aSXin Li yp1 += 2*stride;
179*a58d3d2aSXin Li yp2 -= 2*stride;
180*a58d3d2aSXin Li }
181*a58d3d2aSXin Li }
182*a58d3d2aSXin Li RESTORE_STACK;
183*a58d3d2aSXin Li }
184*a58d3d2aSXin Li
185*a58d3d2aSXin Li #define OVERRIDE_clt_mdct_backward
clt_mdct_backward(const mdct_lookup * l,kiss_fft_scalar * in,kiss_fft_scalar * OPUS_RESTRICT out,const opus_val16 * OPUS_RESTRICT window,int overlap,int shift,int stride,int arch)186*a58d3d2aSXin Li void clt_mdct_backward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar * OPUS_RESTRICT out,
187*a58d3d2aSXin Li const opus_val16 * OPUS_RESTRICT window, int overlap, int shift, int stride, int arch)
188*a58d3d2aSXin Li {
189*a58d3d2aSXin Li int i;
190*a58d3d2aSXin Li int N, N2, N4;
191*a58d3d2aSXin Li const kiss_twiddle_scalar *trig;
192*a58d3d2aSXin Li
193*a58d3d2aSXin Li (void)arch;
194*a58d3d2aSXin Li
195*a58d3d2aSXin Li N = l->n;
196*a58d3d2aSXin Li trig = l->trig;
197*a58d3d2aSXin Li for (i=0;i<shift;i++)
198*a58d3d2aSXin Li {
199*a58d3d2aSXin Li N >>= 1;
200*a58d3d2aSXin Li trig += N;
201*a58d3d2aSXin Li }
202*a58d3d2aSXin Li N2 = N>>1;
203*a58d3d2aSXin Li N4 = N>>2;
204*a58d3d2aSXin Li
205*a58d3d2aSXin Li /* Pre-rotate */
206*a58d3d2aSXin Li {
207*a58d3d2aSXin Li /* Temp pointers to make it really clear to the compiler what we're doing */
208*a58d3d2aSXin Li const kiss_fft_scalar * OPUS_RESTRICT xp1 = in;
209*a58d3d2aSXin Li const kiss_fft_scalar * OPUS_RESTRICT xp2 = in+stride*(N2-1);
210*a58d3d2aSXin Li kiss_fft_scalar * OPUS_RESTRICT yp = out+(overlap>>1);
211*a58d3d2aSXin Li const kiss_twiddle_scalar * OPUS_RESTRICT t = &trig[0];
212*a58d3d2aSXin Li const opus_int16 * OPUS_RESTRICT bitrev = l->kfft[shift]->bitrev;
213*a58d3d2aSXin Li for(i=0;i<N4;i++)
214*a58d3d2aSXin Li {
215*a58d3d2aSXin Li int rev;
216*a58d3d2aSXin Li kiss_fft_scalar yr, yi;
217*a58d3d2aSXin Li rev = *bitrev++;
218*a58d3d2aSXin Li yr = S_MUL_ADD(*xp2, t[i] , *xp1, t[N4+i]);
219*a58d3d2aSXin Li yi = S_MUL_SUB(*xp1, t[i] , *xp2, t[N4+i]);
220*a58d3d2aSXin Li /* We swap real and imag because we use an FFT instead of an IFFT. */
221*a58d3d2aSXin Li yp[2*rev+1] = yr;
222*a58d3d2aSXin Li yp[2*rev] = yi;
223*a58d3d2aSXin Li /* Storing the pre-rotation directly in the bitrev order. */
224*a58d3d2aSXin Li xp1+=2*stride;
225*a58d3d2aSXin Li xp2-=2*stride;
226*a58d3d2aSXin Li }
227*a58d3d2aSXin Li }
228*a58d3d2aSXin Li
229*a58d3d2aSXin Li opus_fft_impl(l->kfft[shift], (kiss_fft_cpx*)(out+(overlap>>1)));
230*a58d3d2aSXin Li
231*a58d3d2aSXin Li /* Post-rotate and de-shuffle from both ends of the buffer at once to make
232*a58d3d2aSXin Li it in-place. */
233*a58d3d2aSXin Li {
234*a58d3d2aSXin Li kiss_fft_scalar * OPUS_RESTRICT yp0 = out+(overlap>>1);
235*a58d3d2aSXin Li kiss_fft_scalar * OPUS_RESTRICT yp1 = out+(overlap>>1)+N2-2;
236*a58d3d2aSXin Li const kiss_twiddle_scalar *t = &trig[0];
237*a58d3d2aSXin Li /* Loop to (N4+1)>>1 to handle odd N4. When N4 is odd, the
238*a58d3d2aSXin Li middle pair will be computed twice. */
239*a58d3d2aSXin Li for(i=0;i<(N4+1)>>1;i++)
240*a58d3d2aSXin Li {
241*a58d3d2aSXin Li kiss_fft_scalar re, im, yr, yi;
242*a58d3d2aSXin Li kiss_twiddle_scalar t0, t1;
243*a58d3d2aSXin Li /* We swap real and imag because we're using an FFT instead of an IFFT. */
244*a58d3d2aSXin Li re = yp0[1];
245*a58d3d2aSXin Li im = yp0[0];
246*a58d3d2aSXin Li t0 = t[i];
247*a58d3d2aSXin Li t1 = t[N4+i];
248*a58d3d2aSXin Li /* We'd scale up by 2 here, but instead it's done when mixing the windows */
249*a58d3d2aSXin Li yr = S_MUL_ADD(re,t0 , im,t1);
250*a58d3d2aSXin Li yi = S_MUL_SUB(re,t1 , im,t0);
251*a58d3d2aSXin Li /* We swap real and imag because we're using an FFT instead of an IFFT. */
252*a58d3d2aSXin Li re = yp1[1];
253*a58d3d2aSXin Li im = yp1[0];
254*a58d3d2aSXin Li yp0[0] = yr;
255*a58d3d2aSXin Li yp1[1] = yi;
256*a58d3d2aSXin Li
257*a58d3d2aSXin Li t0 = t[(N4-i-1)];
258*a58d3d2aSXin Li t1 = t[(N2-i-1)];
259*a58d3d2aSXin Li /* We'd scale up by 2 here, but instead it's done when mixing the windows */
260*a58d3d2aSXin Li yr = S_MUL_ADD(re,t0,im,t1);
261*a58d3d2aSXin Li yi = S_MUL_SUB(re,t1,im,t0);
262*a58d3d2aSXin Li yp1[0] = yr;
263*a58d3d2aSXin Li yp0[1] = yi;
264*a58d3d2aSXin Li yp0 += 2;
265*a58d3d2aSXin Li yp1 -= 2;
266*a58d3d2aSXin Li }
267*a58d3d2aSXin Li }
268*a58d3d2aSXin Li
269*a58d3d2aSXin Li /* Mirror on both sides for TDAC */
270*a58d3d2aSXin Li {
271*a58d3d2aSXin Li kiss_fft_scalar * OPUS_RESTRICT xp1 = out+overlap-1;
272*a58d3d2aSXin Li kiss_fft_scalar * OPUS_RESTRICT yp1 = out;
273*a58d3d2aSXin Li const opus_val16 * OPUS_RESTRICT wp1 = window;
274*a58d3d2aSXin Li const opus_val16 * OPUS_RESTRICT wp2 = window+overlap-1;
275*a58d3d2aSXin Li
276*a58d3d2aSXin Li for(i = 0; i < overlap/2; i++)
277*a58d3d2aSXin Li {
278*a58d3d2aSXin Li kiss_fft_scalar x1, x2;
279*a58d3d2aSXin Li x1 = *xp1;
280*a58d3d2aSXin Li x2 = *yp1;
281*a58d3d2aSXin Li *yp1++ = MULT16_32_Q15(*wp2, x2) - MULT16_32_Q15(*wp1, x1);
282*a58d3d2aSXin Li *xp1-- = MULT16_32_Q15(*wp1, x2) + MULT16_32_Q15(*wp2, x1);
283*a58d3d2aSXin Li wp1++;
284*a58d3d2aSXin Li wp2--;
285*a58d3d2aSXin Li }
286*a58d3d2aSXin Li }
287*a58d3d2aSXin Li }
288*a58d3d2aSXin Li #endif /* MDCT_MIPSR1_H__ */
289