1*a58d3d2aSXin Li /* Copyright (c) 2023 Amazon */
2*a58d3d2aSXin Li /*
3*a58d3d2aSXin Li Redistribution and use in source and binary forms, with or without
4*a58d3d2aSXin Li modification, are permitted provided that the following conditions
5*a58d3d2aSXin Li are met:
6*a58d3d2aSXin Li
7*a58d3d2aSXin Li - Redistributions of source code must retain the above copyright
8*a58d3d2aSXin Li notice, this list of conditions and the following disclaimer.
9*a58d3d2aSXin Li
10*a58d3d2aSXin Li - Redistributions in binary form must reproduce the above copyright
11*a58d3d2aSXin Li notice, this list of conditions and the following disclaimer in the
12*a58d3d2aSXin Li documentation and/or other materials provided with the distribution.
13*a58d3d2aSXin Li
14*a58d3d2aSXin Li THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15*a58d3d2aSXin Li ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16*a58d3d2aSXin Li LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17*a58d3d2aSXin Li A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
18*a58d3d2aSXin Li CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19*a58d3d2aSXin Li EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20*a58d3d2aSXin Li PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21*a58d3d2aSXin Li PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22*a58d3d2aSXin Li LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23*a58d3d2aSXin Li NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24*a58d3d2aSXin Li SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25*a58d3d2aSXin Li */
26*a58d3d2aSXin Li
27*a58d3d2aSXin Li /* This packet loss simulator can be used independently of the Opus codebase.
28*a58d3d2aSXin Li To do that, you need to compile the following files:
29*a58d3d2aSXin Li dnn/lossgen.c
30*a58d3d2aSXin Li dnn/lossgen_data.c
31*a58d3d2aSXin Li
32*a58d3d2aSXin Li with the following files needed as #include
33*a58d3d2aSXin Li dnn/lossgen_data.h
34*a58d3d2aSXin Li dnn/lossgen.h
35*a58d3d2aSXin Li dnn/nnet_arch.h
36*a58d3d2aSXin Li dnn/nnet.h
37*a58d3d2aSXin Li dnn/parse_lpcnet_weights.c (included despite being a C file)
38*a58d3d2aSXin Li dnn/vec_avx.h
39*a58d3d2aSXin Li dnn/vec.h
40*a58d3d2aSXin Li celt/os_support.h
41*a58d3d2aSXin Li celt/arch.h
42*a58d3d2aSXin Li celt/x86/x86_arch_macros.h
43*a58d3d2aSXin Li include/opus_defines.h
44*a58d3d2aSXin Li include/opus_types.h
45*a58d3d2aSXin Li
46*a58d3d2aSXin Li Additionally, the code in dnn/lossgen_demo.c can be used to generate losses from
47*a58d3d2aSXin Li the command line.
48*a58d3d2aSXin Li */
49*a58d3d2aSXin Li
50*a58d3d2aSXin Li #ifdef HAVE_CONFIG_H
51*a58d3d2aSXin Li #include "config.h"
52*a58d3d2aSXin Li #endif
53*a58d3d2aSXin Li
54*a58d3d2aSXin Li #include "arch.h"
55*a58d3d2aSXin Li
56*a58d3d2aSXin Li #include <math.h>
57*a58d3d2aSXin Li #include "lossgen.h"
58*a58d3d2aSXin Li #include "os_support.h"
59*a58d3d2aSXin Li #include "nnet.h"
60*a58d3d2aSXin Li #include "assert.h"
61*a58d3d2aSXin Li
62*a58d3d2aSXin Li /* Disable RTCD for this. */
63*a58d3d2aSXin Li #define RTCD_ARCH c
64*a58d3d2aSXin Li
65*a58d3d2aSXin Li /* Override assert to avoid undefined/redefined symbols. */
66*a58d3d2aSXin Li #undef celt_assert
67*a58d3d2aSXin Li #define celt_assert assert
68*a58d3d2aSXin Li
69*a58d3d2aSXin Li /* Directly include the C files we need since the symbols won't be exposed if we link in a shared object. */
70*a58d3d2aSXin Li #include "parse_lpcnet_weights.c"
71*a58d3d2aSXin Li #include "nnet_arch.h"
72*a58d3d2aSXin Li
73*a58d3d2aSXin Li #undef compute_linear
74*a58d3d2aSXin Li #undef compute_activation
75*a58d3d2aSXin Li
76*a58d3d2aSXin Li /* Force the C version since the SIMD versions may be hidden. */
77*a58d3d2aSXin Li #define compute_linear(linear, out, in, arch) ((void)(arch),compute_linear_c(linear, out, in))
78*a58d3d2aSXin Li #define compute_activation(output, input, N, activation, arch) ((void)(arch),compute_activation_c(output, input, N, activation))
79*a58d3d2aSXin Li
80*a58d3d2aSXin Li #define MAX_RNN_NEURONS_ALL IMAX(LOSSGEN_GRU1_STATE_SIZE, LOSSGEN_GRU2_STATE_SIZE)
81*a58d3d2aSXin Li
82*a58d3d2aSXin Li /* These two functions are copied from nnet.c to make sure we don't have linking issues. */
compute_generic_gru_lossgen(const LinearLayer * input_weights,const LinearLayer * recurrent_weights,float * state,const float * in,int arch)83*a58d3d2aSXin Li void compute_generic_gru_lossgen(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in, int arch)
84*a58d3d2aSXin Li {
85*a58d3d2aSXin Li int i;
86*a58d3d2aSXin Li int N;
87*a58d3d2aSXin Li float zrh[3*MAX_RNN_NEURONS_ALL];
88*a58d3d2aSXin Li float recur[3*MAX_RNN_NEURONS_ALL];
89*a58d3d2aSXin Li float *z;
90*a58d3d2aSXin Li float *r;
91*a58d3d2aSXin Li float *h;
92*a58d3d2aSXin Li celt_assert(3*recurrent_weights->nb_inputs == recurrent_weights->nb_outputs);
93*a58d3d2aSXin Li celt_assert(input_weights->nb_outputs == recurrent_weights->nb_outputs);
94*a58d3d2aSXin Li N = recurrent_weights->nb_inputs;
95*a58d3d2aSXin Li z = zrh;
96*a58d3d2aSXin Li r = &zrh[N];
97*a58d3d2aSXin Li h = &zrh[2*N];
98*a58d3d2aSXin Li celt_assert(recurrent_weights->nb_outputs <= 3*MAX_RNN_NEURONS_ALL);
99*a58d3d2aSXin Li celt_assert(in != state);
100*a58d3d2aSXin Li compute_linear(input_weights, zrh, in, arch);
101*a58d3d2aSXin Li compute_linear(recurrent_weights, recur, state, arch);
102*a58d3d2aSXin Li for (i=0;i<2*N;i++)
103*a58d3d2aSXin Li zrh[i] += recur[i];
104*a58d3d2aSXin Li compute_activation(zrh, zrh, 2*N, ACTIVATION_SIGMOID, arch);
105*a58d3d2aSXin Li for (i=0;i<N;i++)
106*a58d3d2aSXin Li h[i] += recur[2*N+i]*r[i];
107*a58d3d2aSXin Li compute_activation(h, h, N, ACTIVATION_TANH, arch);
108*a58d3d2aSXin Li for (i=0;i<N;i++)
109*a58d3d2aSXin Li h[i] = z[i]*state[i] + (1-z[i])*h[i];
110*a58d3d2aSXin Li for (i=0;i<N;i++)
111*a58d3d2aSXin Li state[i] = h[i];
112*a58d3d2aSXin Li }
113*a58d3d2aSXin Li
114*a58d3d2aSXin Li
compute_generic_dense_lossgen(const LinearLayer * layer,float * output,const float * input,int activation,int arch)115*a58d3d2aSXin Li void compute_generic_dense_lossgen(const LinearLayer *layer, float *output, const float *input, int activation, int arch)
116*a58d3d2aSXin Li {
117*a58d3d2aSXin Li compute_linear(layer, output, input, arch);
118*a58d3d2aSXin Li compute_activation(output, output, layer->nb_outputs, activation, arch);
119*a58d3d2aSXin Li }
120*a58d3d2aSXin Li
121*a58d3d2aSXin Li
sample_loss_impl(LossGenState * st,float percent_loss)122*a58d3d2aSXin Li static int sample_loss_impl(
123*a58d3d2aSXin Li LossGenState *st,
124*a58d3d2aSXin Li float percent_loss)
125*a58d3d2aSXin Li {
126*a58d3d2aSXin Li float input[2];
127*a58d3d2aSXin Li float tmp[LOSSGEN_DENSE_IN_OUT_SIZE];
128*a58d3d2aSXin Li float out;
129*a58d3d2aSXin Li int loss;
130*a58d3d2aSXin Li LossGen *model = &st->model;
131*a58d3d2aSXin Li input[0] = st->last_loss;
132*a58d3d2aSXin Li input[1] = percent_loss;
133*a58d3d2aSXin Li compute_generic_dense_lossgen(&model->lossgen_dense_in, tmp, input, ACTIVATION_TANH, 0);
134*a58d3d2aSXin Li compute_generic_gru_lossgen(&model->lossgen_gru1_input, &model->lossgen_gru1_recurrent, st->gru1_state, tmp, 0);
135*a58d3d2aSXin Li compute_generic_gru_lossgen(&model->lossgen_gru2_input, &model->lossgen_gru2_recurrent, st->gru2_state, st->gru1_state, 0);
136*a58d3d2aSXin Li compute_generic_dense_lossgen(&model->lossgen_dense_out, &out, st->gru2_state, ACTIVATION_SIGMOID, 0);
137*a58d3d2aSXin Li loss = (float)rand()/RAND_MAX < out;
138*a58d3d2aSXin Li st->last_loss = loss;
139*a58d3d2aSXin Li return loss;
140*a58d3d2aSXin Li }
141*a58d3d2aSXin Li
sample_loss(LossGenState * st,float percent_loss)142*a58d3d2aSXin Li int sample_loss(
143*a58d3d2aSXin Li LossGenState *st,
144*a58d3d2aSXin Li float percent_loss)
145*a58d3d2aSXin Li {
146*a58d3d2aSXin Li /* Due to GRU being initialized with zeros, the first packets aren't quite random,
147*a58d3d2aSXin Li so we skip them. */
148*a58d3d2aSXin Li if (!st->used) {
149*a58d3d2aSXin Li int i;
150*a58d3d2aSXin Li for (i=0;i<100;i++) sample_loss_impl(st, percent_loss);
151*a58d3d2aSXin Li st->used = 1;
152*a58d3d2aSXin Li }
153*a58d3d2aSXin Li return sample_loss_impl(st, percent_loss);
154*a58d3d2aSXin Li }
155*a58d3d2aSXin Li
lossgen_init(LossGenState * st)156*a58d3d2aSXin Li void lossgen_init(LossGenState *st)
157*a58d3d2aSXin Li {
158*a58d3d2aSXin Li int ret;
159*a58d3d2aSXin Li OPUS_CLEAR(st, 1);
160*a58d3d2aSXin Li #ifndef USE_WEIGHTS_FILE
161*a58d3d2aSXin Li ret = init_lossgen(&st->model, lossgen_arrays);
162*a58d3d2aSXin Li #else
163*a58d3d2aSXin Li ret = 0;
164*a58d3d2aSXin Li #endif
165*a58d3d2aSXin Li celt_assert(ret == 0);
166*a58d3d2aSXin Li (void)ret;
167*a58d3d2aSXin Li }
168*a58d3d2aSXin Li
lossgen_load_model(LossGenState * st,const void * data,int len)169*a58d3d2aSXin Li int lossgen_load_model(LossGenState *st, const void *data, int len) {
170*a58d3d2aSXin Li WeightArray *list;
171*a58d3d2aSXin Li int ret;
172*a58d3d2aSXin Li parse_weights(&list, data, len);
173*a58d3d2aSXin Li ret = init_lossgen(&st->model, list);
174*a58d3d2aSXin Li opus_free(list);
175*a58d3d2aSXin Li if (ret == 0) return 0;
176*a58d3d2aSXin Li else return -1;
177*a58d3d2aSXin Li }
178*a58d3d2aSXin Li
179*a58d3d2aSXin Li #if 0
180*a58d3d2aSXin Li #include <stdio.h>
181*a58d3d2aSXin Li int main(int argc, char **argv) {
182*a58d3d2aSXin Li int i, N;
183*a58d3d2aSXin Li float p;
184*a58d3d2aSXin Li LossGenState st;
185*a58d3d2aSXin Li if (argc!=3) {
186*a58d3d2aSXin Li fprintf(stderr, "usage: lossgen <percentage> <length>\n");
187*a58d3d2aSXin Li return 1;
188*a58d3d2aSXin Li }
189*a58d3d2aSXin Li lossgen_init(&st);
190*a58d3d2aSXin Li p = atof(argv[1]);
191*a58d3d2aSXin Li N = atoi(argv[2]);
192*a58d3d2aSXin Li for (i=0;i<N;i++) {
193*a58d3d2aSXin Li printf("%d\n", sample_loss(&st, p));
194*a58d3d2aSXin Li }
195*a58d3d2aSXin Li }
196*a58d3d2aSXin Li #endif
197