1 /* Copyright (c) 2023 Amazon */
2 /*
3 Redistribution and use in source and binary forms, with or without
4 modification, are permitted provided that the following conditions
5 are met:
6
7 - Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9
10 - Redistributions in binary form must reproduce the above copyright
11 notice, this list of conditions and the following disclaimer in the
12 documentation and/or other materials provided with the distribution.
13
14 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
18 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /* This packet loss simulator can be used independently of the Opus codebase.
28 To do that, you need to compile the following files:
29 dnn/lossgen.c
30 dnn/lossgen_data.c
31
32 with the following files needed as #include
33 dnn/lossgen_data.h
34 dnn/lossgen.h
35 dnn/nnet_arch.h
36 dnn/nnet.h
37 dnn/parse_lpcnet_weights.c (included despite being a C file)
38 dnn/vec_avx.h
39 dnn/vec.h
40 celt/os_support.h
41 celt/arch.h
42 celt/x86/x86_arch_macros.h
43 include/opus_defines.h
44 include/opus_types.h
45
46 Additionally, the code in dnn/lossgen_demo.c can be used to generate losses from
47 the command line.
48 */
49
50 #ifdef HAVE_CONFIG_H
51 #include "config.h"
52 #endif
53
54 #include "arch.h"
55
56 #include <math.h>
57 #include "lossgen.h"
58 #include "os_support.h"
59 #include "nnet.h"
60 #include "assert.h"
61
62 /* Disable RTCD for this. */
63 #define RTCD_ARCH c
64
65 /* Override assert to avoid undefined/redefined symbols. */
66 #undef celt_assert
67 #define celt_assert assert
68
69 /* Directly include the C files we need since the symbols won't be exposed if we link in a shared object. */
70 #include "parse_lpcnet_weights.c"
71 #include "nnet_arch.h"
72
73 #undef compute_linear
74 #undef compute_activation
75
76 /* Force the C version since the SIMD versions may be hidden. */
77 #define compute_linear(linear, out, in, arch) ((void)(arch),compute_linear_c(linear, out, in))
78 #define compute_activation(output, input, N, activation, arch) ((void)(arch),compute_activation_c(output, input, N, activation))
79
80 #define MAX_RNN_NEURONS_ALL IMAX(LOSSGEN_GRU1_STATE_SIZE, LOSSGEN_GRU2_STATE_SIZE)
81
82 /* These two functions are copied from nnet.c to make sure we don't have linking issues. */
compute_generic_gru_lossgen(const LinearLayer * input_weights,const LinearLayer * recurrent_weights,float * state,const float * in,int arch)83 void compute_generic_gru_lossgen(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in, int arch)
84 {
85 int i;
86 int N;
87 float zrh[3*MAX_RNN_NEURONS_ALL];
88 float recur[3*MAX_RNN_NEURONS_ALL];
89 float *z;
90 float *r;
91 float *h;
92 celt_assert(3*recurrent_weights->nb_inputs == recurrent_weights->nb_outputs);
93 celt_assert(input_weights->nb_outputs == recurrent_weights->nb_outputs);
94 N = recurrent_weights->nb_inputs;
95 z = zrh;
96 r = &zrh[N];
97 h = &zrh[2*N];
98 celt_assert(recurrent_weights->nb_outputs <= 3*MAX_RNN_NEURONS_ALL);
99 celt_assert(in != state);
100 compute_linear(input_weights, zrh, in, arch);
101 compute_linear(recurrent_weights, recur, state, arch);
102 for (i=0;i<2*N;i++)
103 zrh[i] += recur[i];
104 compute_activation(zrh, zrh, 2*N, ACTIVATION_SIGMOID, arch);
105 for (i=0;i<N;i++)
106 h[i] += recur[2*N+i]*r[i];
107 compute_activation(h, h, N, ACTIVATION_TANH, arch);
108 for (i=0;i<N;i++)
109 h[i] = z[i]*state[i] + (1-z[i])*h[i];
110 for (i=0;i<N;i++)
111 state[i] = h[i];
112 }
113
114
compute_generic_dense_lossgen(const LinearLayer * layer,float * output,const float * input,int activation,int arch)115 void compute_generic_dense_lossgen(const LinearLayer *layer, float *output, const float *input, int activation, int arch)
116 {
117 compute_linear(layer, output, input, arch);
118 compute_activation(output, output, layer->nb_outputs, activation, arch);
119 }
120
121
sample_loss_impl(LossGenState * st,float percent_loss)122 static int sample_loss_impl(
123 LossGenState *st,
124 float percent_loss)
125 {
126 float input[2];
127 float tmp[LOSSGEN_DENSE_IN_OUT_SIZE];
128 float out;
129 int loss;
130 LossGen *model = &st->model;
131 input[0] = st->last_loss;
132 input[1] = percent_loss;
133 compute_generic_dense_lossgen(&model->lossgen_dense_in, tmp, input, ACTIVATION_TANH, 0);
134 compute_generic_gru_lossgen(&model->lossgen_gru1_input, &model->lossgen_gru1_recurrent, st->gru1_state, tmp, 0);
135 compute_generic_gru_lossgen(&model->lossgen_gru2_input, &model->lossgen_gru2_recurrent, st->gru2_state, st->gru1_state, 0);
136 compute_generic_dense_lossgen(&model->lossgen_dense_out, &out, st->gru2_state, ACTIVATION_SIGMOID, 0);
137 loss = (float)rand()/RAND_MAX < out;
138 st->last_loss = loss;
139 return loss;
140 }
141
sample_loss(LossGenState * st,float percent_loss)142 int sample_loss(
143 LossGenState *st,
144 float percent_loss)
145 {
146 /* Due to GRU being initialized with zeros, the first packets aren't quite random,
147 so we skip them. */
148 if (!st->used) {
149 int i;
150 for (i=0;i<100;i++) sample_loss_impl(st, percent_loss);
151 st->used = 1;
152 }
153 return sample_loss_impl(st, percent_loss);
154 }
155
lossgen_init(LossGenState * st)156 void lossgen_init(LossGenState *st)
157 {
158 int ret;
159 OPUS_CLEAR(st, 1);
160 #ifndef USE_WEIGHTS_FILE
161 ret = init_lossgen(&st->model, lossgen_arrays);
162 #else
163 ret = 0;
164 #endif
165 celt_assert(ret == 0);
166 (void)ret;
167 }
168
lossgen_load_model(LossGenState * st,const void * data,int len)169 int lossgen_load_model(LossGenState *st, const void *data, int len) {
170 WeightArray *list;
171 int ret;
172 parse_weights(&list, data, len);
173 ret = init_lossgen(&st->model, list);
174 opus_free(list);
175 if (ret == 0) return 0;
176 else return -1;
177 }
178
179 #if 0
180 #include <stdio.h>
181 int main(int argc, char **argv) {
182 int i, N;
183 float p;
184 LossGenState st;
185 if (argc!=3) {
186 fprintf(stderr, "usage: lossgen <percentage> <length>\n");
187 return 1;
188 }
189 lossgen_init(&st);
190 p = atof(argv[1]);
191 N = atoi(argv[2]);
192 for (i=0;i<N;i++) {
193 printf("%d\n", sample_loss(&st, p));
194 }
195 }
196 #endif
197