1*a58d3d2aSXin Li /* Copyright (c) 2007-2008 CSIRO
2*a58d3d2aSXin Li Copyright (c) 2007-2009 Xiph.Org Foundation
3*a58d3d2aSXin Li Written by Jean-Marc Valin */
4*a58d3d2aSXin Li /*
5*a58d3d2aSXin Li Redistribution and use in source and binary forms, with or without
6*a58d3d2aSXin Li modification, are permitted provided that the following conditions
7*a58d3d2aSXin Li are met:
8*a58d3d2aSXin Li
9*a58d3d2aSXin Li - Redistributions of source code must retain the above copyright
10*a58d3d2aSXin Li notice, this list of conditions and the following disclaimer.
11*a58d3d2aSXin Li
12*a58d3d2aSXin Li - Redistributions in binary form must reproduce the above copyright
13*a58d3d2aSXin Li notice, this list of conditions and the following disclaimer in the
14*a58d3d2aSXin Li documentation and/or other materials provided with the distribution.
15*a58d3d2aSXin Li
16*a58d3d2aSXin Li THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17*a58d3d2aSXin Li ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18*a58d3d2aSXin Li LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19*a58d3d2aSXin Li A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
20*a58d3d2aSXin Li OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21*a58d3d2aSXin Li EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22*a58d3d2aSXin Li PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23*a58d3d2aSXin Li PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24*a58d3d2aSXin Li LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25*a58d3d2aSXin Li NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26*a58d3d2aSXin Li SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27*a58d3d2aSXin Li */
28*a58d3d2aSXin Li
29*a58d3d2aSXin Li #ifndef VQ_MIPSR1_H__
30*a58d3d2aSXin Li #define VQ_MIPSR1_H__
31*a58d3d2aSXin Li
32*a58d3d2aSXin Li #ifdef HAVE_CONFIG_H
33*a58d3d2aSXin Li #include "config.h"
34*a58d3d2aSXin Li #endif
35*a58d3d2aSXin Li
36*a58d3d2aSXin Li #include "mathops.h"
37*a58d3d2aSXin Li #include "arch.h"
38*a58d3d2aSXin Li
39*a58d3d2aSXin Li #define OVERRIDE_vq_exp_rotation1
exp_rotation1(celt_norm * X,int len,int stride,opus_val16 c,opus_val16 s)40*a58d3d2aSXin Li static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_val16 s)
41*a58d3d2aSXin Li {
42*a58d3d2aSXin Li int i;
43*a58d3d2aSXin Li opus_val16 ms;
44*a58d3d2aSXin Li celt_norm *Xptr;
45*a58d3d2aSXin Li Xptr = X;
46*a58d3d2aSXin Li ms = NEG16(s);
47*a58d3d2aSXin Li for (i=0;i<len-stride;i++)
48*a58d3d2aSXin Li {
49*a58d3d2aSXin Li celt_norm x1, x2;
50*a58d3d2aSXin Li x1 = Xptr[0];
51*a58d3d2aSXin Li x2 = Xptr[stride];
52*a58d3d2aSXin Li Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15));
53*a58d3d2aSXin Li *Xptr++ = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15));
54*a58d3d2aSXin Li }
55*a58d3d2aSXin Li Xptr = &X[len-2*stride-1];
56*a58d3d2aSXin Li for (i=len-2*stride-1;i>=0;i--)
57*a58d3d2aSXin Li {
58*a58d3d2aSXin Li celt_norm x1, x2;
59*a58d3d2aSXin Li x1 = Xptr[0];
60*a58d3d2aSXin Li x2 = Xptr[stride];
61*a58d3d2aSXin Li Xptr[stride] = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x2), s, x1), 15));
62*a58d3d2aSXin Li *Xptr-- = EXTRACT16(PSHR32(MAC16_16(MULT16_16(c, x1), ms, x2), 15));
63*a58d3d2aSXin Li }
64*a58d3d2aSXin Li }
65*a58d3d2aSXin Li
66*a58d3d2aSXin Li #define OVERRIDE_renormalise_vector
renormalise_vector(celt_norm * X,int N,opus_val16 gain,int arch)67*a58d3d2aSXin Li void renormalise_vector(celt_norm *X, int N, opus_val16 gain, int arch)
68*a58d3d2aSXin Li {
69*a58d3d2aSXin Li int i;
70*a58d3d2aSXin Li #ifdef FIXED_POINT
71*a58d3d2aSXin Li int k;
72*a58d3d2aSXin Li #endif
73*a58d3d2aSXin Li opus_val32 E = EPSILON;
74*a58d3d2aSXin Li opus_val16 g;
75*a58d3d2aSXin Li opus_val32 t;
76*a58d3d2aSXin Li celt_norm *xptr = X;
77*a58d3d2aSXin Li int X0, X1;
78*a58d3d2aSXin Li
79*a58d3d2aSXin Li (void)arch;
80*a58d3d2aSXin Li
81*a58d3d2aSXin Li asm volatile("mult $ac1, $0, $0");
82*a58d3d2aSXin Li asm volatile("MTLO %0, $ac1" : :"r" (E));
83*a58d3d2aSXin Li /*if(N %4)
84*a58d3d2aSXin Li printf("error");*/
85*a58d3d2aSXin Li for (i=0;i<N-2;i+=2)
86*a58d3d2aSXin Li {
87*a58d3d2aSXin Li X0 = (int)*xptr++;
88*a58d3d2aSXin Li asm volatile("MADD $ac1, %0, %1" : : "r" (X0), "r" (X0));
89*a58d3d2aSXin Li
90*a58d3d2aSXin Li X1 = (int)*xptr++;
91*a58d3d2aSXin Li asm volatile("MADD $ac1, %0, %1" : : "r" (X1), "r" (X1));
92*a58d3d2aSXin Li }
93*a58d3d2aSXin Li
94*a58d3d2aSXin Li for (;i<N;i++)
95*a58d3d2aSXin Li {
96*a58d3d2aSXin Li X0 = (int)*xptr++;
97*a58d3d2aSXin Li asm volatile("MADD $ac1, %0, %1" : : "r" (X0), "r" (X0));
98*a58d3d2aSXin Li }
99*a58d3d2aSXin Li
100*a58d3d2aSXin Li asm volatile("MFLO %0, $ac1" : "=r" (E));
101*a58d3d2aSXin Li #ifdef FIXED_POINT
102*a58d3d2aSXin Li k = celt_ilog2(E)>>1;
103*a58d3d2aSXin Li #endif
104*a58d3d2aSXin Li t = VSHR32(E, 2*(k-7));
105*a58d3d2aSXin Li g = MULT16_16_P15(celt_rsqrt_norm(t),gain);
106*a58d3d2aSXin Li
107*a58d3d2aSXin Li xptr = X;
108*a58d3d2aSXin Li for (i=0;i<N;i++)
109*a58d3d2aSXin Li {
110*a58d3d2aSXin Li *xptr = EXTRACT16(PSHR32(MULT16_16(g, *xptr), k+1));
111*a58d3d2aSXin Li xptr++;
112*a58d3d2aSXin Li }
113*a58d3d2aSXin Li /*return celt_sqrt(E);*/
114*a58d3d2aSXin Li }
115*a58d3d2aSXin Li
116*a58d3d2aSXin Li #endif /* VQ_MIPSR1_H__ */
117