1*a58d3d2aSXin Li /***********************************************************************
2*a58d3d2aSXin Li Copyright (c) 2006-2011, Skype Limited. All rights reserved.
3*a58d3d2aSXin Li Redistribution and use in source and binary forms, with or without
4*a58d3d2aSXin Li modification, are permitted provided that the following conditions
5*a58d3d2aSXin Li are met:
6*a58d3d2aSXin Li - Redistributions of source code must retain the above copyright notice,
7*a58d3d2aSXin Li this list of conditions and the following disclaimer.
8*a58d3d2aSXin Li - Redistributions in binary form must reproduce the above copyright
9*a58d3d2aSXin Li notice, this list of conditions and the following disclaimer in the
10*a58d3d2aSXin Li documentation and/or other materials provided with the distribution.
11*a58d3d2aSXin Li - Neither the name of Internet Society, IETF or IETF Trust, nor the
12*a58d3d2aSXin Li names of specific contributors, may be used to endorse or promote
13*a58d3d2aSXin Li products derived from this software without specific prior written
14*a58d3d2aSXin Li permission.
15*a58d3d2aSXin Li THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16*a58d3d2aSXin Li AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*a58d3d2aSXin Li IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18*a58d3d2aSXin Li ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19*a58d3d2aSXin Li LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20*a58d3d2aSXin Li CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21*a58d3d2aSXin Li SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22*a58d3d2aSXin Li INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23*a58d3d2aSXin Li CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24*a58d3d2aSXin Li ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25*a58d3d2aSXin Li POSSIBILITY OF SUCH DAMAGE.
26*a58d3d2aSXin Li ***********************************************************************/
27*a58d3d2aSXin Li
28*a58d3d2aSXin Li #ifdef HAVE_CONFIG_H
29*a58d3d2aSXin Li #include "config.h"
30*a58d3d2aSXin Li #endif
31*a58d3d2aSXin Li
32*a58d3d2aSXin Li #include "SigProc_FIX.h"
33*a58d3d2aSXin Li
34*a58d3d2aSXin Li /* Faster than schur64(), but much less accurate. */
35*a58d3d2aSXin Li /* uses SMLAWB(), requiring armv5E and higher. */
silk_schur(opus_int16 * rc_Q15,const opus_int32 * c,const opus_int32 order)36*a58d3d2aSXin Li opus_int32 silk_schur( /* O Returns residual energy */
37*a58d3d2aSXin Li opus_int16 *rc_Q15, /* O reflection coefficients [order] Q15 */
38*a58d3d2aSXin Li const opus_int32 *c, /* I correlations [order+1] */
39*a58d3d2aSXin Li const opus_int32 order /* I prediction order */
40*a58d3d2aSXin Li )
41*a58d3d2aSXin Li {
42*a58d3d2aSXin Li opus_int k, n, lz;
43*a58d3d2aSXin Li opus_int32 C[ SILK_MAX_ORDER_LPC + 1 ][ 2 ];
44*a58d3d2aSXin Li opus_int32 Ctmp1, Ctmp2, rc_tmp_Q15;
45*a58d3d2aSXin Li
46*a58d3d2aSXin Li celt_assert( order >= 0 && order <= SILK_MAX_ORDER_LPC );
47*a58d3d2aSXin Li
48*a58d3d2aSXin Li /* Get number of leading zeros */
49*a58d3d2aSXin Li lz = silk_CLZ32( c[ 0 ] );
50*a58d3d2aSXin Li
51*a58d3d2aSXin Li /* Copy correlations and adjust level to Q30 */
52*a58d3d2aSXin Li k = 0;
53*a58d3d2aSXin Li if( lz < 2 ) {
54*a58d3d2aSXin Li /* lz must be 1, so shift one to the right */
55*a58d3d2aSXin Li do {
56*a58d3d2aSXin Li C[ k ][ 0 ] = C[ k ][ 1 ] = silk_RSHIFT( c[ k ], 1 );
57*a58d3d2aSXin Li } while( ++k <= order );
58*a58d3d2aSXin Li } else if( lz > 2 ) {
59*a58d3d2aSXin Li /* Shift to the left */
60*a58d3d2aSXin Li lz -= 2;
61*a58d3d2aSXin Li do {
62*a58d3d2aSXin Li C[ k ][ 0 ] = C[ k ][ 1 ] = silk_LSHIFT( c[ k ], lz );
63*a58d3d2aSXin Li } while( ++k <= order );
64*a58d3d2aSXin Li } else {
65*a58d3d2aSXin Li /* No need to shift */
66*a58d3d2aSXin Li do {
67*a58d3d2aSXin Li C[ k ][ 0 ] = C[ k ][ 1 ] = c[ k ];
68*a58d3d2aSXin Li } while( ++k <= order );
69*a58d3d2aSXin Li }
70*a58d3d2aSXin Li
71*a58d3d2aSXin Li for( k = 0; k < order; k++ ) {
72*a58d3d2aSXin Li /* Check that we won't be getting an unstable rc, otherwise stop here. */
73*a58d3d2aSXin Li if (silk_abs_int32(C[ k + 1 ][ 0 ]) >= C[ 0 ][ 1 ]) {
74*a58d3d2aSXin Li if ( C[ k + 1 ][ 0 ] > 0 ) {
75*a58d3d2aSXin Li rc_Q15[ k ] = -SILK_FIX_CONST( .99f, 15 );
76*a58d3d2aSXin Li } else {
77*a58d3d2aSXin Li rc_Q15[ k ] = SILK_FIX_CONST( .99f, 15 );
78*a58d3d2aSXin Li }
79*a58d3d2aSXin Li k++;
80*a58d3d2aSXin Li break;
81*a58d3d2aSXin Li }
82*a58d3d2aSXin Li
83*a58d3d2aSXin Li /* Get reflection coefficient */
84*a58d3d2aSXin Li rc_tmp_Q15 = -silk_DIV32_16( C[ k + 1 ][ 0 ], silk_max_32( silk_RSHIFT( C[ 0 ][ 1 ], 15 ), 1 ) );
85*a58d3d2aSXin Li
86*a58d3d2aSXin Li /* Clip (shouldn't happen for properly conditioned inputs) */
87*a58d3d2aSXin Li rc_tmp_Q15 = silk_SAT16( rc_tmp_Q15 );
88*a58d3d2aSXin Li
89*a58d3d2aSXin Li /* Store */
90*a58d3d2aSXin Li rc_Q15[ k ] = (opus_int16)rc_tmp_Q15;
91*a58d3d2aSXin Li
92*a58d3d2aSXin Li /* Update correlations */
93*a58d3d2aSXin Li for( n = 0; n < order - k; n++ ) {
94*a58d3d2aSXin Li Ctmp1 = C[ n + k + 1 ][ 0 ];
95*a58d3d2aSXin Li Ctmp2 = C[ n ][ 1 ];
96*a58d3d2aSXin Li C[ n + k + 1 ][ 0 ] = silk_SMLAWB( Ctmp1, silk_LSHIFT( Ctmp2, 1 ), rc_tmp_Q15 );
97*a58d3d2aSXin Li C[ n ][ 1 ] = silk_SMLAWB( Ctmp2, silk_LSHIFT( Ctmp1, 1 ), rc_tmp_Q15 );
98*a58d3d2aSXin Li }
99*a58d3d2aSXin Li }
100*a58d3d2aSXin Li
101*a58d3d2aSXin Li for(; k < order; k++ ) {
102*a58d3d2aSXin Li rc_Q15[ k ] = 0;
103*a58d3d2aSXin Li }
104*a58d3d2aSXin Li
105*a58d3d2aSXin Li /* return residual energy */
106*a58d3d2aSXin Li return silk_max_32( 1, C[ 0 ][ 1 ] );
107*a58d3d2aSXin Li }
108