xref: /aosp_15_r20/external/rnnoise/src/rnn.c (revision 1295d6828459cc82c3c29cc5d7d297215250a74b)
1 /* Copyright (c) 2008-2011 Octasic Inc.
2                  2012-2017 Jean-Marc Valin */
3 /*
4    Redistribution and use in source and binary forms, with or without
5    modification, are permitted provided that the following conditions
6    are met:
7 
8    - Redistributions of source code must retain the above copyright
9    notice, this list of conditions and the following disclaimer.
10 
11    - Redistributions in binary form must reproduce the above copyright
12    notice, this list of conditions and the following disclaimer in the
13    documentation and/or other materials provided with the distribution.
14 
15    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16    ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18    A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR
19    CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20    EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21    PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22    PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27 
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31 
32 #include <math.h>
33 #include "opus_types.h"
34 #include "common.h"
35 #include "arch.h"
36 #include "tansig_table.h"
37 #include "rnn.h"
38 #include "rnn_data.h"
39 #include <stdio.h>
40 
tansig_approx(float x)41 static OPUS_INLINE float tansig_approx(float x)
42 {
43     int i;
44     float y, dy;
45     float sign=1;
46     /* Tests are reversed to catch NaNs */
47     if (!(x<8))
48         return 1;
49     if (!(x>-8))
50         return -1;
51 #ifndef FIXED_POINT
52     /* Another check in case of -ffast-math */
53     if (celt_isnan(x))
54        return 0;
55 #endif
56     if (x<0)
57     {
58        x=-x;
59        sign=-1;
60     }
61     i = (int)floor(.5f+25*x);
62     x -= .04f*i;
63     y = tansig_table[i];
64     dy = 1-y*y;
65     y = y + x*dy*(1 - y*x);
66     return sign*y;
67 }
68 
sigmoid_approx(float x)69 static OPUS_INLINE float sigmoid_approx(float x)
70 {
71    return .5 + .5*tansig_approx(.5*x);
72 }
73 
relu(float x)74 static OPUS_INLINE float relu(float x)
75 {
76    return x < 0 ? 0 : x;
77 }
78 
compute_dense(const DenseLayer * layer,float * output,const float * input)79 void compute_dense(const DenseLayer *layer, float *output, const float *input)
80 {
81    int i, j;
82    int N, M;
83    int stride;
84    M = layer->nb_inputs;
85    N = layer->nb_neurons;
86    stride = N;
87    for (i=0;i<N;i++)
88    {
89       /* Compute update gate. */
90       float sum = layer->bias[i];
91       for (j=0;j<M;j++)
92          sum += layer->input_weights[j*stride + i]*input[j];
93       output[i] = WEIGHTS_SCALE*sum;
94    }
95    if (layer->activation == ACTIVATION_SIGMOID) {
96       for (i=0;i<N;i++)
97          output[i] = sigmoid_approx(output[i]);
98    } else if (layer->activation == ACTIVATION_TANH) {
99       for (i=0;i<N;i++)
100          output[i] = tansig_approx(output[i]);
101    } else if (layer->activation == ACTIVATION_RELU) {
102       for (i=0;i<N;i++)
103          output[i] = relu(output[i]);
104    } else {
105      *(int*)0=0;
106    }
107 }
108 
compute_gru(const GRULayer * gru,float * state,const float * input)109 void compute_gru(const GRULayer *gru, float *state, const float *input)
110 {
111    int i, j;
112    int N, M;
113    int stride;
114    float z[MAX_NEURONS];
115    float r[MAX_NEURONS];
116    float h[MAX_NEURONS];
117    M = gru->nb_inputs;
118    N = gru->nb_neurons;
119    stride = 3*N;
120    for (i=0;i<N;i++)
121    {
122       /* Compute update gate. */
123       float sum = gru->bias[i];
124       for (j=0;j<M;j++)
125          sum += gru->input_weights[j*stride + i]*input[j];
126       for (j=0;j<N;j++)
127          sum += gru->recurrent_weights[j*stride + i]*state[j];
128       z[i] = sigmoid_approx(WEIGHTS_SCALE*sum);
129    }
130    for (i=0;i<N;i++)
131    {
132       /* Compute reset gate. */
133       float sum = gru->bias[N + i];
134       for (j=0;j<M;j++)
135          sum += gru->input_weights[N + j*stride + i]*input[j];
136       for (j=0;j<N;j++)
137          sum += gru->recurrent_weights[N + j*stride + i]*state[j];
138       r[i] = sigmoid_approx(WEIGHTS_SCALE*sum);
139    }
140    for (i=0;i<N;i++)
141    {
142       /* Compute output. */
143       float sum = gru->bias[2*N + i];
144       for (j=0;j<M;j++)
145          sum += gru->input_weights[2*N + j*stride + i]*input[j];
146       for (j=0;j<N;j++)
147          sum += gru->recurrent_weights[2*N + j*stride + i]*state[j]*r[j];
148       if (gru->activation == ACTIVATION_SIGMOID) sum = sigmoid_approx(WEIGHTS_SCALE*sum);
149       else if (gru->activation == ACTIVATION_TANH) sum = tansig_approx(WEIGHTS_SCALE*sum);
150       else if (gru->activation == ACTIVATION_RELU) sum = relu(WEIGHTS_SCALE*sum);
151       else *(int*)0=0;
152       h[i] = z[i]*state[i] + (1-z[i])*sum;
153    }
154    for (i=0;i<N;i++)
155       state[i] = h[i];
156 }
157 
158 #define INPUT_SIZE 42
159 
compute_rnn(RNNState * rnn,float * gains,float * vad,const float * input)160 void compute_rnn(RNNState *rnn, float *gains, float *vad, const float *input) {
161   int i;
162   float dense_out[MAX_NEURONS];
163   float noise_input[MAX_NEURONS*3];
164   float denoise_input[MAX_NEURONS*3];
165   compute_dense(rnn->model->input_dense, dense_out, input);
166   compute_gru(rnn->model->vad_gru, rnn->vad_gru_state, dense_out);
167   compute_dense(rnn->model->vad_output, vad, rnn->vad_gru_state);
168   for (i=0;i<rnn->model->input_dense_size;i++) noise_input[i] = dense_out[i];
169   for (i=0;i<rnn->model->vad_gru_size;i++) noise_input[i+rnn->model->input_dense_size] = rnn->vad_gru_state[i];
170   for (i=0;i<INPUT_SIZE;i++) noise_input[i+rnn->model->input_dense_size+rnn->model->vad_gru_size] = input[i];
171   compute_gru(rnn->model->noise_gru, rnn->noise_gru_state, noise_input);
172 
173   for (i=0;i<rnn->model->vad_gru_size;i++) denoise_input[i] = rnn->vad_gru_state[i];
174   for (i=0;i<rnn->model->noise_gru_size;i++) denoise_input[i+rnn->model->vad_gru_size] = rnn->noise_gru_state[i];
175   for (i=0;i<INPUT_SIZE;i++) denoise_input[i+rnn->model->vad_gru_size+rnn->model->noise_gru_size] = input[i];
176   compute_gru(rnn->model->denoise_gru, rnn->denoise_gru_state, denoise_input);
177   compute_dense(rnn->model->denoise_output, gains, rnn->denoise_gru_state);
178 }
179