1 /* The audioop module uses the code base in g777.c file of the Sox project.
2 * Source: https://web.archive.org/web/19970716121258/http://www.spies.com/Sox/Archive/soxgamma.tar.gz
3 * Programming the AdLib/Sound Blaster
4 * FM Music Chips
5 * Version 2.0 (24 Feb 1992)
6 *
7 * Copyright (c) 1991, 1992 by Jeffrey S. Lee
8 *
9 * [email protected]
10 *
11 *
12 *
13 * Warranty and Copyright Policy
14 *
15 * This document is provided on an "as-is" basis, and its author makes
16 * no warranty or representation, express or implied, with respect to
17 * its quality performance or fitness for a particular purpose. In no
18 * event will the author of this document be liable for direct, indirect,
19 * special, incidental, or consequential damages arising out of the use
20 * or inability to use the information contained within. Use of this
21 * document is at your own risk.
22 *
23 * This file may be used and copied freely so long as the applicable
24 * copyright notices are retained, and no modifications are made to the
25 * text of the document. No money shall be charged for its distribution
26 * beyond reasonable shipping, handling and duplication costs, nor shall
27 * proprietary changes be made to this document so that it cannot be
28 * distributed freely. This document may not be included in published
29 * material or commercial packages without the written consent of its
30 * author. */
31
32 /* audioopmodule - Module to detect peak values in arrays */
33
34 #define PY_SSIZE_T_CLEAN
35
36 #include "Python.h"
37
38 static const int maxvals[] = {0, 0x7F, 0x7FFF, 0x7FFFFF, 0x7FFFFFFF};
39 /* -1 trick is needed on Windows to support -0x80000000 without a warning */
40 static const int minvals[] = {0, -0x80, -0x8000, -0x800000, -0x7FFFFFFF-1};
41 static const unsigned int masks[] = {0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF};
42
43 static int
fbound(double val,double minval,double maxval)44 fbound(double val, double minval, double maxval)
45 {
46 if (val > maxval) {
47 val = maxval;
48 }
49 else if (val < minval + 1.0) {
50 val = minval;
51 }
52
53 /* Round towards minus infinity (-inf) */
54 val = floor(val);
55
56 /* Cast double to integer: round towards zero */
57 return (int)val;
58 }
59
60
61 #define BIAS 0x84 /* define the add-in bias for 16 bit samples */
62 #define CLIP 32635
63 #define SIGN_BIT (0x80) /* Sign bit for an A-law byte. */
64 #define QUANT_MASK (0xf) /* Quantization field mask. */
65 #define SEG_SHIFT (4) /* Left shift for segment number. */
66 #define SEG_MASK (0x70) /* Segment field mask. */
67
68 static const int16_t seg_aend[8] = {
69 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF
70 };
71 static const int16_t seg_uend[8] = {
72 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF
73 };
74
75 static int16_t
search(int16_t val,const int16_t * table,int size)76 search(int16_t val, const int16_t *table, int size)
77 {
78 int i;
79
80 for (i = 0; i < size; i++) {
81 if (val <= *table++)
82 return (i);
83 }
84 return (size);
85 }
86 #define st_ulaw2linear16(uc) (_st_ulaw2linear16[uc])
87 #define st_alaw2linear16(uc) (_st_alaw2linear16[uc])
88
89 static const int16_t _st_ulaw2linear16[256] = {
90 -32124, -31100, -30076, -29052, -28028, -27004, -25980,
91 -24956, -23932, -22908, -21884, -20860, -19836, -18812,
92 -17788, -16764, -15996, -15484, -14972, -14460, -13948,
93 -13436, -12924, -12412, -11900, -11388, -10876, -10364,
94 -9852, -9340, -8828, -8316, -7932, -7676, -7420,
95 -7164, -6908, -6652, -6396, -6140, -5884, -5628,
96 -5372, -5116, -4860, -4604, -4348, -4092, -3900,
97 -3772, -3644, -3516, -3388, -3260, -3132, -3004,
98 -2876, -2748, -2620, -2492, -2364, -2236, -2108,
99 -1980, -1884, -1820, -1756, -1692, -1628, -1564,
100 -1500, -1436, -1372, -1308, -1244, -1180, -1116,
101 -1052, -988, -924, -876, -844, -812, -780,
102 -748, -716, -684, -652, -620, -588, -556,
103 -524, -492, -460, -428, -396, -372, -356,
104 -340, -324, -308, -292, -276, -260, -244,
105 -228, -212, -196, -180, -164, -148, -132,
106 -120, -112, -104, -96, -88, -80, -72,
107 -64, -56, -48, -40, -32, -24, -16,
108 -8, 0, 32124, 31100, 30076, 29052, 28028,
109 27004, 25980, 24956, 23932, 22908, 21884, 20860,
110 19836, 18812, 17788, 16764, 15996, 15484, 14972,
111 14460, 13948, 13436, 12924, 12412, 11900, 11388,
112 10876, 10364, 9852, 9340, 8828, 8316, 7932,
113 7676, 7420, 7164, 6908, 6652, 6396, 6140,
114 5884, 5628, 5372, 5116, 4860, 4604, 4348,
115 4092, 3900, 3772, 3644, 3516, 3388, 3260,
116 3132, 3004, 2876, 2748, 2620, 2492, 2364,
117 2236, 2108, 1980, 1884, 1820, 1756, 1692,
118 1628, 1564, 1500, 1436, 1372, 1308, 1244,
119 1180, 1116, 1052, 988, 924, 876, 844,
120 812, 780, 748, 716, 684, 652, 620,
121 588, 556, 524, 492, 460, 428, 396,
122 372, 356, 340, 324, 308, 292, 276,
123 260, 244, 228, 212, 196, 180, 164,
124 148, 132, 120, 112, 104, 96, 88,
125 80, 72, 64, 56, 48, 40, 32,
126 24, 16, 8, 0
127 };
128
129 /*
130 * linear2ulaw() accepts a 14-bit signed integer and encodes it as u-law data
131 * stored in an unsigned char. This function should only be called with
132 * the data shifted such that it only contains information in the lower
133 * 14-bits.
134 *
135 * In order to simplify the encoding process, the original linear magnitude
136 * is biased by adding 33 which shifts the encoding range from (0 - 8158) to
137 * (33 - 8191). The result can be seen in the following encoding table:
138 *
139 * Biased Linear Input Code Compressed Code
140 * ------------------------ ---------------
141 * 00000001wxyza 000wxyz
142 * 0000001wxyzab 001wxyz
143 * 000001wxyzabc 010wxyz
144 * 00001wxyzabcd 011wxyz
145 * 0001wxyzabcde 100wxyz
146 * 001wxyzabcdef 101wxyz
147 * 01wxyzabcdefg 110wxyz
148 * 1wxyzabcdefgh 111wxyz
149 *
150 * Each biased linear code has a leading 1 which identifies the segment
151 * number. The value of the segment number is equal to 7 minus the number
152 * of leading 0's. The quantization interval is directly available as the
153 * four bits wxyz. * The trailing bits (a - h) are ignored.
154 *
155 * Ordinarily the complement of the resulting code word is used for
156 * transmission, and so the code word is complemented before it is returned.
157 *
158 * For further information see John C. Bellamy's Digital Telephony, 1982,
159 * John Wiley & Sons, pps 98-111 and 472-476.
160 */
161 static unsigned char
st_14linear2ulaw(int16_t pcm_val)162 st_14linear2ulaw(int16_t pcm_val) /* 2's complement (14-bit range) */
163 {
164 int16_t mask;
165 int16_t seg;
166 unsigned char uval;
167
168 /* u-law inverts all bits */
169 /* Get the sign and the magnitude of the value. */
170 if (pcm_val < 0) {
171 pcm_val = -pcm_val;
172 mask = 0x7F;
173 } else {
174 mask = 0xFF;
175 }
176 if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
177 pcm_val += (BIAS >> 2);
178
179 /* Convert the scaled magnitude to segment number. */
180 seg = search(pcm_val, seg_uend, 8);
181
182 /*
183 * Combine the sign, segment, quantization bits;
184 * and complement the code word.
185 */
186 if (seg >= 8) /* out of range, return maximum value. */
187 return (unsigned char) (0x7F ^ mask);
188 else {
189 uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
190 return (uval ^ mask);
191 }
192
193 }
194
195 static const int16_t _st_alaw2linear16[256] = {
196 -5504, -5248, -6016, -5760, -4480, -4224, -4992,
197 -4736, -7552, -7296, -8064, -7808, -6528, -6272,
198 -7040, -6784, -2752, -2624, -3008, -2880, -2240,
199 -2112, -2496, -2368, -3776, -3648, -4032, -3904,
200 -3264, -3136, -3520, -3392, -22016, -20992, -24064,
201 -23040, -17920, -16896, -19968, -18944, -30208, -29184,
202 -32256, -31232, -26112, -25088, -28160, -27136, -11008,
203 -10496, -12032, -11520, -8960, -8448, -9984, -9472,
204 -15104, -14592, -16128, -15616, -13056, -12544, -14080,
205 -13568, -344, -328, -376, -360, -280, -264,
206 -312, -296, -472, -456, -504, -488, -408,
207 -392, -440, -424, -88, -72, -120, -104,
208 -24, -8, -56, -40, -216, -200, -248,
209 -232, -152, -136, -184, -168, -1376, -1312,
210 -1504, -1440, -1120, -1056, -1248, -1184, -1888,
211 -1824, -2016, -1952, -1632, -1568, -1760, -1696,
212 -688, -656, -752, -720, -560, -528, -624,
213 -592, -944, -912, -1008, -976, -816, -784,
214 -880, -848, 5504, 5248, 6016, 5760, 4480,
215 4224, 4992, 4736, 7552, 7296, 8064, 7808,
216 6528, 6272, 7040, 6784, 2752, 2624, 3008,
217 2880, 2240, 2112, 2496, 2368, 3776, 3648,
218 4032, 3904, 3264, 3136, 3520, 3392, 22016,
219 20992, 24064, 23040, 17920, 16896, 19968, 18944,
220 30208, 29184, 32256, 31232, 26112, 25088, 28160,
221 27136, 11008, 10496, 12032, 11520, 8960, 8448,
222 9984, 9472, 15104, 14592, 16128, 15616, 13056,
223 12544, 14080, 13568, 344, 328, 376, 360,
224 280, 264, 312, 296, 472, 456, 504,
225 488, 408, 392, 440, 424, 88, 72,
226 120, 104, 24, 8, 56, 40, 216,
227 200, 248, 232, 152, 136, 184, 168,
228 1376, 1312, 1504, 1440, 1120, 1056, 1248,
229 1184, 1888, 1824, 2016, 1952, 1632, 1568,
230 1760, 1696, 688, 656, 752, 720, 560,
231 528, 624, 592, 944, 912, 1008, 976,
232 816, 784, 880, 848
233 };
234
235 /*
236 * linear2alaw() accepts a 13-bit signed integer and encodes it as A-law data
237 * stored in an unsigned char. This function should only be called with
238 * the data shifted such that it only contains information in the lower
239 * 13-bits.
240 *
241 * Linear Input Code Compressed Code
242 * ------------------------ ---------------
243 * 0000000wxyza 000wxyz
244 * 0000001wxyza 001wxyz
245 * 000001wxyzab 010wxyz
246 * 00001wxyzabc 011wxyz
247 * 0001wxyzabcd 100wxyz
248 * 001wxyzabcde 101wxyz
249 * 01wxyzabcdef 110wxyz
250 * 1wxyzabcdefg 111wxyz
251 *
252 * For further information see John C. Bellamy's Digital Telephony, 1982,
253 * John Wiley & Sons, pps 98-111 and 472-476.
254 */
255 static unsigned char
st_linear2alaw(int16_t pcm_val)256 st_linear2alaw(int16_t pcm_val) /* 2's complement (13-bit range) */
257 {
258 int16_t mask;
259 int16_t seg;
260 unsigned char aval;
261
262 /* A-law using even bit inversion */
263 if (pcm_val >= 0) {
264 mask = 0xD5; /* sign (7th) bit = 1 */
265 } else {
266 mask = 0x55; /* sign bit = 0 */
267 pcm_val = -pcm_val - 1;
268 }
269
270 /* Convert the scaled magnitude to segment number. */
271 seg = search(pcm_val, seg_aend, 8);
272
273 /* Combine the sign, segment, and quantization bits. */
274
275 if (seg >= 8) /* out of range, return maximum value. */
276 return (unsigned char) (0x7F ^ mask);
277 else {
278 aval = (unsigned char) seg << SEG_SHIFT;
279 if (seg < 2)
280 aval |= (pcm_val >> 1) & QUANT_MASK;
281 else
282 aval |= (pcm_val >> seg) & QUANT_MASK;
283 return (aval ^ mask);
284 }
285 }
286 /* End of code taken from sox */
287
288 /* Intel ADPCM step variation table */
289 static const int indexTable[16] = {
290 -1, -1, -1, -1, 2, 4, 6, 8,
291 -1, -1, -1, -1, 2, 4, 6, 8,
292 };
293
294 static const int stepsizeTable[89] = {
295 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
296 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
297 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
298 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
299 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
300 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
301 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
302 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
303 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
304 };
305
306 #define GETINTX(T, cp, i) (*(T *)((unsigned char *)(cp) + (i)))
307 #define SETINTX(T, cp, i, val) do { \
308 *(T *)((unsigned char *)(cp) + (i)) = (T)(val); \
309 } while (0)
310
311
312 #define GETINT8(cp, i) GETINTX(signed char, (cp), (i))
313 #define GETINT16(cp, i) GETINTX(int16_t, (cp), (i))
314 #define GETINT32(cp, i) GETINTX(int32_t, (cp), (i))
315
316 #ifdef WORDS_BIGENDIAN
317 #define GETINT24(cp, i) ( \
318 ((unsigned char *)(cp) + (i))[2] + \
319 (((unsigned char *)(cp) + (i))[1] << 8) + \
320 (((signed char *)(cp) + (i))[0] << 16) )
321 #else
322 #define GETINT24(cp, i) ( \
323 ((unsigned char *)(cp) + (i))[0] + \
324 (((unsigned char *)(cp) + (i))[1] << 8) + \
325 (((signed char *)(cp) + (i))[2] << 16) )
326 #endif
327
328
329 #define SETINT8(cp, i, val) SETINTX(signed char, (cp), (i), (val))
330 #define SETINT16(cp, i, val) SETINTX(int16_t, (cp), (i), (val))
331 #define SETINT32(cp, i, val) SETINTX(int32_t, (cp), (i), (val))
332
333 #ifdef WORDS_BIGENDIAN
334 #define SETINT24(cp, i, val) do { \
335 ((unsigned char *)(cp) + (i))[2] = (int)(val); \
336 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
337 ((signed char *)(cp) + (i))[0] = (int)(val) >> 16; \
338 } while (0)
339 #else
340 #define SETINT24(cp, i, val) do { \
341 ((unsigned char *)(cp) + (i))[0] = (int)(val); \
342 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
343 ((signed char *)(cp) + (i))[2] = (int)(val) >> 16; \
344 } while (0)
345 #endif
346
347
348 #define GETRAWSAMPLE(size, cp, i) ( \
349 (size == 1) ? (int)GETINT8((cp), (i)) : \
350 (size == 2) ? (int)GETINT16((cp), (i)) : \
351 (size == 3) ? (int)GETINT24((cp), (i)) : \
352 (int)GETINT32((cp), (i)))
353
354 #define SETRAWSAMPLE(size, cp, i, val) do { \
355 if (size == 1) \
356 SETINT8((cp), (i), (val)); \
357 else if (size == 2) \
358 SETINT16((cp), (i), (val)); \
359 else if (size == 3) \
360 SETINT24((cp), (i), (val)); \
361 else \
362 SETINT32((cp), (i), (val)); \
363 } while(0)
364
365
366 #define GETSAMPLE32(size, cp, i) ( \
367 (size == 1) ? (int)GETINT8((cp), (i)) << 24 : \
368 (size == 2) ? (int)GETINT16((cp), (i)) << 16 : \
369 (size == 3) ? (int)GETINT24((cp), (i)) << 8 : \
370 (int)GETINT32((cp), (i)))
371
372 #define SETSAMPLE32(size, cp, i, val) do { \
373 if (size == 1) \
374 SETINT8((cp), (i), (val) >> 24); \
375 else if (size == 2) \
376 SETINT16((cp), (i), (val) >> 16); \
377 else if (size == 3) \
378 SETINT24((cp), (i), (val) >> 8); \
379 else \
380 SETINT32((cp), (i), (val)); \
381 } while(0)
382
383 static PyModuleDef audioopmodule;
384
385 typedef struct {
386 PyObject *AudioopError;
387 } audioop_state;
388
389 static inline audioop_state *
get_audioop_state(PyObject * module)390 get_audioop_state(PyObject *module)
391 {
392 void *state = PyModule_GetState(module);
393 assert(state != NULL);
394 return (audioop_state *)state;
395 }
396
397 static int
audioop_check_size(PyObject * module,int size)398 audioop_check_size(PyObject *module, int size)
399 {
400 if (size < 1 || size > 4) {
401 PyErr_SetString(get_audioop_state(module)->AudioopError,
402 "Size should be 1, 2, 3 or 4");
403 return 0;
404 }
405 else
406 return 1;
407 }
408
409 static int
audioop_check_parameters(PyObject * module,Py_ssize_t len,int size)410 audioop_check_parameters(PyObject *module, Py_ssize_t len, int size)
411 {
412 if (!audioop_check_size(module, size))
413 return 0;
414 if (len % size != 0) {
415 PyErr_SetString(get_audioop_state(module)->AudioopError,
416 "not a whole number of frames");
417 return 0;
418 }
419 return 1;
420 }
421
422 /*[clinic input]
423 module audioop
424 [clinic start generated code]*/
425 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=8fa8f6611be3591a]*/
426
427 /*[clinic input]
428 audioop.getsample
429
430 fragment: Py_buffer
431 width: int
432 index: Py_ssize_t
433 /
434
435 Return the value of sample index from the fragment.
436 [clinic start generated code]*/
437
438 static PyObject *
audioop_getsample_impl(PyObject * module,Py_buffer * fragment,int width,Py_ssize_t index)439 audioop_getsample_impl(PyObject *module, Py_buffer *fragment, int width,
440 Py_ssize_t index)
441 /*[clinic end generated code: output=8fe1b1775134f39a input=88edbe2871393549]*/
442 {
443 int val;
444
445 if (!audioop_check_parameters(module, fragment->len, width))
446 return NULL;
447 if (index < 0 || index >= fragment->len/width) {
448 PyErr_SetString(get_audioop_state(module)->AudioopError,
449 "Index out of range");
450 return NULL;
451 }
452 val = GETRAWSAMPLE(width, fragment->buf, index*width);
453 return PyLong_FromLong(val);
454 }
455
456 /*[clinic input]
457 audioop.max
458
459 fragment: Py_buffer
460 width: int
461 /
462
463 Return the maximum of the absolute value of all samples in a fragment.
464 [clinic start generated code]*/
465
466 static PyObject *
audioop_max_impl(PyObject * module,Py_buffer * fragment,int width)467 audioop_max_impl(PyObject *module, Py_buffer *fragment, int width)
468 /*[clinic end generated code: output=e6c5952714f1c3f0 input=32bea5ea0ac8c223]*/
469 {
470 Py_ssize_t i;
471 unsigned int absval, max = 0;
472
473 if (!audioop_check_parameters(module, fragment->len, width))
474 return NULL;
475 for (i = 0; i < fragment->len; i += width) {
476 int val = GETRAWSAMPLE(width, fragment->buf, i);
477 /* Cast to unsigned before negating. Unsigned overflow is well-
478 defined, but signed overflow is not. */
479 if (val < 0) absval = (unsigned int)-(int64_t)val;
480 else absval = val;
481 if (absval > max) max = absval;
482 }
483 return PyLong_FromUnsignedLong(max);
484 }
485
486 /*[clinic input]
487 audioop.minmax
488
489 fragment: Py_buffer
490 width: int
491 /
492
493 Return the minimum and maximum values of all samples in the sound fragment.
494 [clinic start generated code]*/
495
496 static PyObject *
audioop_minmax_impl(PyObject * module,Py_buffer * fragment,int width)497 audioop_minmax_impl(PyObject *module, Py_buffer *fragment, int width)
498 /*[clinic end generated code: output=473fda66b15c836e input=89848e9b927a0696]*/
499 {
500 Py_ssize_t i;
501 /* -1 trick below is needed on Windows to support -0x80000000 without
502 a warning */
503 int min = 0x7fffffff, max = -0x7FFFFFFF-1;
504
505 if (!audioop_check_parameters(module, fragment->len, width))
506 return NULL;
507 for (i = 0; i < fragment->len; i += width) {
508 int val = GETRAWSAMPLE(width, fragment->buf, i);
509 if (val > max) max = val;
510 if (val < min) min = val;
511 }
512 return Py_BuildValue("(ii)", min, max);
513 }
514
515 /*[clinic input]
516 audioop.avg
517
518 fragment: Py_buffer
519 width: int
520 /
521
522 Return the average over all samples in the fragment.
523 [clinic start generated code]*/
524
525 static PyObject *
audioop_avg_impl(PyObject * module,Py_buffer * fragment,int width)526 audioop_avg_impl(PyObject *module, Py_buffer *fragment, int width)
527 /*[clinic end generated code: output=4410a4c12c3586e6 input=1114493c7611334d]*/
528 {
529 Py_ssize_t i;
530 int avg;
531 double sum = 0.0;
532
533 if (!audioop_check_parameters(module, fragment->len, width))
534 return NULL;
535 for (i = 0; i < fragment->len; i += width)
536 sum += GETRAWSAMPLE(width, fragment->buf, i);
537 if (fragment->len == 0)
538 avg = 0;
539 else
540 avg = (int)floor(sum / (double)(fragment->len/width));
541 return PyLong_FromLong(avg);
542 }
543
544 /*[clinic input]
545 audioop.rms
546
547 fragment: Py_buffer
548 width: int
549 /
550
551 Return the root-mean-square of the fragment, i.e. sqrt(sum(S_i^2)/n).
552 [clinic start generated code]*/
553
554 static PyObject *
audioop_rms_impl(PyObject * module,Py_buffer * fragment,int width)555 audioop_rms_impl(PyObject *module, Py_buffer *fragment, int width)
556 /*[clinic end generated code: output=1e7871c826445698 input=4cc57c6c94219d78]*/
557 {
558 Py_ssize_t i;
559 unsigned int res;
560 double sum_squares = 0.0;
561
562 if (!audioop_check_parameters(module, fragment->len, width))
563 return NULL;
564 for (i = 0; i < fragment->len; i += width) {
565 double val = GETRAWSAMPLE(width, fragment->buf, i);
566 sum_squares += val*val;
567 }
568 if (fragment->len == 0)
569 res = 0;
570 else
571 res = (unsigned int)sqrt(sum_squares / (double)(fragment->len/width));
572 return PyLong_FromUnsignedLong(res);
573 }
574
_sum2(const int16_t * a,const int16_t * b,Py_ssize_t len)575 static double _sum2(const int16_t *a, const int16_t *b, Py_ssize_t len)
576 {
577 Py_ssize_t i;
578 double sum = 0.0;
579
580 for( i=0; i<len; i++) {
581 sum = sum + (double)a[i]*(double)b[i];
582 }
583 return sum;
584 }
585
586 /*
587 ** Findfit tries to locate a sample within another sample. Its main use
588 ** is in echo-cancellation (to find the feedback of the output signal in
589 ** the input signal).
590 ** The method used is as follows:
591 **
592 ** let R be the reference signal (length n) and A the input signal (length N)
593 ** with N > n, and let all sums be over i from 0 to n-1.
594 **
595 ** Now, for each j in {0..N-n} we compute a factor fj so that -fj*R matches A
596 ** as good as possible, i.e. sum( (A[j+i]+fj*R[i])^2 ) is minimal. This
597 ** equation gives fj = sum( A[j+i]R[i] ) / sum(R[i]^2).
598 **
599 ** Next, we compute the relative distance between the original signal and
600 ** the modified signal and minimize that over j:
601 ** vj = sum( (A[j+i]-fj*R[i])^2 ) / sum( A[j+i]^2 ) =>
602 ** vj = ( sum(A[j+i]^2)*sum(R[i]^2) - sum(A[j+i]R[i])^2 ) / sum( A[j+i]^2 )
603 **
604 ** In the code variables correspond as follows:
605 ** cp1 A
606 ** cp2 R
607 ** len1 N
608 ** len2 n
609 ** aj_m1 A[j-1]
610 ** aj_lm1 A[j+n-1]
611 ** sum_ri_2 sum(R[i]^2)
612 ** sum_aij_2 sum(A[i+j]^2)
613 ** sum_aij_ri sum(A[i+j]R[i])
614 **
615 ** sum_ri is calculated once, sum_aij_2 is updated each step and sum_aij_ri
616 ** is completely recalculated each step.
617 */
618 /*[clinic input]
619 audioop.findfit
620
621 fragment: Py_buffer
622 reference: Py_buffer
623 /
624
625 Try to match reference as well as possible to a portion of fragment.
626 [clinic start generated code]*/
627
628 static PyObject *
audioop_findfit_impl(PyObject * module,Py_buffer * fragment,Py_buffer * reference)629 audioop_findfit_impl(PyObject *module, Py_buffer *fragment,
630 Py_buffer *reference)
631 /*[clinic end generated code: output=5752306d83cbbada input=62c305605e183c9a]*/
632 {
633 const int16_t *cp1, *cp2;
634 Py_ssize_t len1, len2;
635 Py_ssize_t j, best_j;
636 double aj_m1, aj_lm1;
637 double sum_ri_2, sum_aij_2, sum_aij_ri, result, best_result, factor;
638
639 if (fragment->len & 1 || reference->len & 1) {
640 PyErr_SetString(get_audioop_state(module)->AudioopError,
641 "Strings should be even-sized");
642 return NULL;
643 }
644 cp1 = (const int16_t *)fragment->buf;
645 len1 = fragment->len >> 1;
646 cp2 = (const int16_t *)reference->buf;
647 len2 = reference->len >> 1;
648
649 if (len1 < len2) {
650 PyErr_SetString(get_audioop_state(module)->AudioopError,
651 "First sample should be longer");
652 return NULL;
653 }
654 sum_ri_2 = _sum2(cp2, cp2, len2);
655 sum_aij_2 = _sum2(cp1, cp1, len2);
656 sum_aij_ri = _sum2(cp1, cp2, len2);
657
658 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri) / sum_aij_2;
659
660 best_result = result;
661 best_j = 0;
662
663 for ( j=1; j<=len1-len2; j++) {
664 aj_m1 = (double)cp1[j-1];
665 aj_lm1 = (double)cp1[j+len2-1];
666
667 sum_aij_2 = sum_aij_2 + aj_lm1*aj_lm1 - aj_m1*aj_m1;
668 sum_aij_ri = _sum2(cp1+j, cp2, len2);
669
670 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri)
671 / sum_aij_2;
672
673 if ( result < best_result ) {
674 best_result = result;
675 best_j = j;
676 }
677
678 }
679
680 factor = _sum2(cp1+best_j, cp2, len2) / sum_ri_2;
681
682 return Py_BuildValue("(nf)", best_j, factor);
683 }
684
685 /*
686 ** findfactor finds a factor f so that the energy in A-fB is minimal.
687 ** See the comment for findfit for details.
688 */
689 /*[clinic input]
690 audioop.findfactor
691
692 fragment: Py_buffer
693 reference: Py_buffer
694 /
695
696 Return a factor F such that rms(add(fragment, mul(reference, -F))) is minimal.
697 [clinic start generated code]*/
698
699 static PyObject *
audioop_findfactor_impl(PyObject * module,Py_buffer * fragment,Py_buffer * reference)700 audioop_findfactor_impl(PyObject *module, Py_buffer *fragment,
701 Py_buffer *reference)
702 /*[clinic end generated code: output=14ea95652c1afcf8 input=816680301d012b21]*/
703 {
704 const int16_t *cp1, *cp2;
705 Py_ssize_t len;
706 double sum_ri_2, sum_aij_ri, result;
707
708 if (fragment->len & 1 || reference->len & 1) {
709 PyErr_SetString(get_audioop_state(module)->AudioopError,
710 "Strings should be even-sized");
711 return NULL;
712 }
713 if (fragment->len != reference->len) {
714 PyErr_SetString(get_audioop_state(module)->AudioopError,
715 "Samples should be same size");
716 return NULL;
717 }
718 cp1 = (const int16_t *)fragment->buf;
719 cp2 = (const int16_t *)reference->buf;
720 len = fragment->len >> 1;
721 sum_ri_2 = _sum2(cp2, cp2, len);
722 sum_aij_ri = _sum2(cp1, cp2, len);
723
724 result = sum_aij_ri / sum_ri_2;
725
726 return PyFloat_FromDouble(result);
727 }
728
729 /*
730 ** findmax returns the index of the n-sized segment of the input sample
731 ** that contains the most energy.
732 */
733 /*[clinic input]
734 audioop.findmax
735
736 fragment: Py_buffer
737 length: Py_ssize_t
738 /
739
740 Search fragment for a slice of specified number of samples with maximum energy.
741 [clinic start generated code]*/
742
743 static PyObject *
audioop_findmax_impl(PyObject * module,Py_buffer * fragment,Py_ssize_t length)744 audioop_findmax_impl(PyObject *module, Py_buffer *fragment,
745 Py_ssize_t length)
746 /*[clinic end generated code: output=f008128233523040 input=2f304801ed42383c]*/
747 {
748 const int16_t *cp1;
749 Py_ssize_t len1;
750 Py_ssize_t j, best_j;
751 double aj_m1, aj_lm1;
752 double result, best_result;
753
754 if (fragment->len & 1) {
755 PyErr_SetString(get_audioop_state(module)->AudioopError,
756 "Strings should be even-sized");
757 return NULL;
758 }
759 cp1 = (const int16_t *)fragment->buf;
760 len1 = fragment->len >> 1;
761
762 if (length < 0 || len1 < length) {
763 PyErr_SetString(get_audioop_state(module)->AudioopError,
764 "Input sample should be longer");
765 return NULL;
766 }
767
768 result = _sum2(cp1, cp1, length);
769
770 best_result = result;
771 best_j = 0;
772
773 for ( j=1; j<=len1-length; j++) {
774 aj_m1 = (double)cp1[j-1];
775 aj_lm1 = (double)cp1[j+length-1];
776
777 result = result + aj_lm1*aj_lm1 - aj_m1*aj_m1;
778
779 if ( result > best_result ) {
780 best_result = result;
781 best_j = j;
782 }
783
784 }
785
786 return PyLong_FromSsize_t(best_j);
787 }
788
789 /*[clinic input]
790 audioop.avgpp
791
792 fragment: Py_buffer
793 width: int
794 /
795
796 Return the average peak-peak value over all samples in the fragment.
797 [clinic start generated code]*/
798
799 static PyObject *
audioop_avgpp_impl(PyObject * module,Py_buffer * fragment,int width)800 audioop_avgpp_impl(PyObject *module, Py_buffer *fragment, int width)
801 /*[clinic end generated code: output=269596b0d5ae0b2b input=0b3cceeae420a7d9]*/
802 {
803 Py_ssize_t i;
804 int prevval, prevextremevalid = 0, prevextreme = 0;
805 double sum = 0.0;
806 unsigned int avg;
807 int diff, prevdiff, nextreme = 0;
808
809 if (!audioop_check_parameters(module, fragment->len, width))
810 return NULL;
811 if (fragment->len <= width)
812 return PyLong_FromLong(0);
813 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
814 prevdiff = 17; /* Anything != 0, 1 */
815 for (i = width; i < fragment->len; i += width) {
816 int val = GETRAWSAMPLE(width, fragment->buf, i);
817 if (val != prevval) {
818 diff = val < prevval;
819 if (prevdiff == !diff) {
820 /* Derivative changed sign. Compute difference to last
821 ** extreme value and remember.
822 */
823 if (prevextremevalid) {
824 if (prevval < prevextreme)
825 sum += (double)((unsigned int)prevextreme -
826 (unsigned int)prevval);
827 else
828 sum += (double)((unsigned int)prevval -
829 (unsigned int)prevextreme);
830 nextreme++;
831 }
832 prevextremevalid = 1;
833 prevextreme = prevval;
834 }
835 prevval = val;
836 prevdiff = diff;
837 }
838 }
839 if ( nextreme == 0 )
840 avg = 0;
841 else
842 avg = (unsigned int)(sum / (double)nextreme);
843 return PyLong_FromUnsignedLong(avg);
844 }
845
846 /*[clinic input]
847 audioop.maxpp
848
849 fragment: Py_buffer
850 width: int
851 /
852
853 Return the maximum peak-peak value in the sound fragment.
854 [clinic start generated code]*/
855
856 static PyObject *
audioop_maxpp_impl(PyObject * module,Py_buffer * fragment,int width)857 audioop_maxpp_impl(PyObject *module, Py_buffer *fragment, int width)
858 /*[clinic end generated code: output=5b918ed5dbbdb978 input=671a13e1518f80a1]*/
859 {
860 Py_ssize_t i;
861 int prevval, prevextremevalid = 0, prevextreme = 0;
862 unsigned int max = 0, extremediff;
863 int diff, prevdiff;
864
865 if (!audioop_check_parameters(module, fragment->len, width))
866 return NULL;
867 if (fragment->len <= width)
868 return PyLong_FromLong(0);
869 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
870 prevdiff = 17; /* Anything != 0, 1 */
871 for (i = width; i < fragment->len; i += width) {
872 int val = GETRAWSAMPLE(width, fragment->buf, i);
873 if (val != prevval) {
874 diff = val < prevval;
875 if (prevdiff == !diff) {
876 /* Derivative changed sign. Compute difference to
877 ** last extreme value and remember.
878 */
879 if (prevextremevalid) {
880 if (prevval < prevextreme)
881 extremediff = (unsigned int)prevextreme -
882 (unsigned int)prevval;
883 else
884 extremediff = (unsigned int)prevval -
885 (unsigned int)prevextreme;
886 if ( extremediff > max )
887 max = extremediff;
888 }
889 prevextremevalid = 1;
890 prevextreme = prevval;
891 }
892 prevval = val;
893 prevdiff = diff;
894 }
895 }
896 return PyLong_FromUnsignedLong(max);
897 }
898
899 /*[clinic input]
900 audioop.cross
901
902 fragment: Py_buffer
903 width: int
904 /
905
906 Return the number of zero crossings in the fragment passed as an argument.
907 [clinic start generated code]*/
908
909 static PyObject *
audioop_cross_impl(PyObject * module,Py_buffer * fragment,int width)910 audioop_cross_impl(PyObject *module, Py_buffer *fragment, int width)
911 /*[clinic end generated code: output=5938dcdd74a1f431 input=b1b3f15b83f6b41a]*/
912 {
913 Py_ssize_t i;
914 int prevval;
915 Py_ssize_t ncross;
916
917 if (!audioop_check_parameters(module, fragment->len, width))
918 return NULL;
919 ncross = -1;
920 prevval = 17; /* Anything <> 0,1 */
921 for (i = 0; i < fragment->len; i += width) {
922 int val = GETRAWSAMPLE(width, fragment->buf, i) < 0;
923 if (val != prevval) ncross++;
924 prevval = val;
925 }
926 return PyLong_FromSsize_t(ncross);
927 }
928
929 /*[clinic input]
930 audioop.mul
931
932 fragment: Py_buffer
933 width: int
934 factor: double
935 /
936
937 Return a fragment that has all samples in the original fragment multiplied by the floating-point value factor.
938 [clinic start generated code]*/
939
940 static PyObject *
audioop_mul_impl(PyObject * module,Py_buffer * fragment,int width,double factor)941 audioop_mul_impl(PyObject *module, Py_buffer *fragment, int width,
942 double factor)
943 /*[clinic end generated code: output=6cd48fe796da0ea4 input=c726667baa157d3c]*/
944 {
945 signed char *ncp;
946 Py_ssize_t i;
947 double maxval, minval;
948 PyObject *rv;
949
950 if (!audioop_check_parameters(module, fragment->len, width))
951 return NULL;
952
953 maxval = (double) maxvals[width];
954 minval = (double) minvals[width];
955
956 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
957 if (rv == NULL)
958 return NULL;
959 ncp = (signed char *)PyBytes_AsString(rv);
960
961 for (i = 0; i < fragment->len; i += width) {
962 double val = GETRAWSAMPLE(width, fragment->buf, i);
963 int ival = fbound(val * factor, minval, maxval);
964 SETRAWSAMPLE(width, ncp, i, ival);
965 }
966 return rv;
967 }
968
969 /*[clinic input]
970 audioop.tomono
971
972 fragment: Py_buffer
973 width: int
974 lfactor: double
975 rfactor: double
976 /
977
978 Convert a stereo fragment to a mono fragment.
979 [clinic start generated code]*/
980
981 static PyObject *
audioop_tomono_impl(PyObject * module,Py_buffer * fragment,int width,double lfactor,double rfactor)982 audioop_tomono_impl(PyObject *module, Py_buffer *fragment, int width,
983 double lfactor, double rfactor)
984 /*[clinic end generated code: output=235c8277216d4e4e input=c4ec949b3f4dddfa]*/
985 {
986 signed char *cp, *ncp;
987 Py_ssize_t len, i;
988 double maxval, minval;
989 PyObject *rv;
990
991 cp = fragment->buf;
992 len = fragment->len;
993 if (!audioop_check_parameters(module, len, width))
994 return NULL;
995 if (((len / width) & 1) != 0) {
996 PyErr_SetString(get_audioop_state(module)->AudioopError,
997 "not a whole number of frames");
998 return NULL;
999 }
1000
1001 maxval = (double) maxvals[width];
1002 minval = (double) minvals[width];
1003
1004 rv = PyBytes_FromStringAndSize(NULL, len/2);
1005 if (rv == NULL)
1006 return NULL;
1007 ncp = (signed char *)PyBytes_AsString(rv);
1008
1009 for (i = 0; i < len; i += width*2) {
1010 double val1 = GETRAWSAMPLE(width, cp, i);
1011 double val2 = GETRAWSAMPLE(width, cp, i + width);
1012 double val = val1 * lfactor + val2 * rfactor;
1013 int ival = fbound(val, minval, maxval);
1014 SETRAWSAMPLE(width, ncp, i/2, ival);
1015 }
1016 return rv;
1017 }
1018
1019 /*[clinic input]
1020 audioop.tostereo
1021
1022 fragment: Py_buffer
1023 width: int
1024 lfactor: double
1025 rfactor: double
1026 /
1027
1028 Generate a stereo fragment from a mono fragment.
1029 [clinic start generated code]*/
1030
1031 static PyObject *
audioop_tostereo_impl(PyObject * module,Py_buffer * fragment,int width,double lfactor,double rfactor)1032 audioop_tostereo_impl(PyObject *module, Py_buffer *fragment, int width,
1033 double lfactor, double rfactor)
1034 /*[clinic end generated code: output=046f13defa5f1595 input=27b6395ebfdff37a]*/
1035 {
1036 signed char *ncp;
1037 Py_ssize_t i;
1038 double maxval, minval;
1039 PyObject *rv;
1040
1041 if (!audioop_check_parameters(module, fragment->len, width))
1042 return NULL;
1043
1044 maxval = (double) maxvals[width];
1045 minval = (double) minvals[width];
1046
1047 if (fragment->len > PY_SSIZE_T_MAX/2) {
1048 PyErr_SetString(PyExc_MemoryError,
1049 "not enough memory for output buffer");
1050 return NULL;
1051 }
1052
1053 rv = PyBytes_FromStringAndSize(NULL, fragment->len*2);
1054 if (rv == NULL)
1055 return NULL;
1056 ncp = (signed char *)PyBytes_AsString(rv);
1057
1058 for (i = 0; i < fragment->len; i += width) {
1059 double val = GETRAWSAMPLE(width, fragment->buf, i);
1060 int val1 = fbound(val * lfactor, minval, maxval);
1061 int val2 = fbound(val * rfactor, minval, maxval);
1062 SETRAWSAMPLE(width, ncp, i*2, val1);
1063 SETRAWSAMPLE(width, ncp, i*2 + width, val2);
1064 }
1065 return rv;
1066 }
1067
1068 /*[clinic input]
1069 audioop.add
1070
1071 fragment1: Py_buffer
1072 fragment2: Py_buffer
1073 width: int
1074 /
1075
1076 Return a fragment which is the addition of the two samples passed as parameters.
1077 [clinic start generated code]*/
1078
1079 static PyObject *
audioop_add_impl(PyObject * module,Py_buffer * fragment1,Py_buffer * fragment2,int width)1080 audioop_add_impl(PyObject *module, Py_buffer *fragment1,
1081 Py_buffer *fragment2, int width)
1082 /*[clinic end generated code: output=60140af4d1aab6f2 input=4a8d4bae4c1605c7]*/
1083 {
1084 signed char *ncp;
1085 Py_ssize_t i;
1086 int minval, maxval, newval;
1087 PyObject *rv;
1088
1089 if (!audioop_check_parameters(module, fragment1->len, width))
1090 return NULL;
1091 if (fragment1->len != fragment2->len) {
1092 PyErr_SetString(get_audioop_state(module)->AudioopError,
1093 "Lengths should be the same");
1094 return NULL;
1095 }
1096
1097 maxval = maxvals[width];
1098 minval = minvals[width];
1099
1100 rv = PyBytes_FromStringAndSize(NULL, fragment1->len);
1101 if (rv == NULL)
1102 return NULL;
1103 ncp = (signed char *)PyBytes_AsString(rv);
1104
1105 for (i = 0; i < fragment1->len; i += width) {
1106 int val1 = GETRAWSAMPLE(width, fragment1->buf, i);
1107 int val2 = GETRAWSAMPLE(width, fragment2->buf, i);
1108
1109 if (width < 4) {
1110 newval = val1 + val2;
1111 /* truncate in case of overflow */
1112 if (newval > maxval)
1113 newval = maxval;
1114 else if (newval < minval)
1115 newval = minval;
1116 }
1117 else {
1118 double fval = (double)val1 + (double)val2;
1119 /* truncate in case of overflow */
1120 newval = fbound(fval, minval, maxval);
1121 }
1122
1123 SETRAWSAMPLE(width, ncp, i, newval);
1124 }
1125 return rv;
1126 }
1127
1128 /*[clinic input]
1129 audioop.bias
1130
1131 fragment: Py_buffer
1132 width: int
1133 bias: int
1134 /
1135
1136 Return a fragment that is the original fragment with a bias added to each sample.
1137 [clinic start generated code]*/
1138
1139 static PyObject *
audioop_bias_impl(PyObject * module,Py_buffer * fragment,int width,int bias)1140 audioop_bias_impl(PyObject *module, Py_buffer *fragment, int width, int bias)
1141 /*[clinic end generated code: output=6e0aa8f68f045093 input=2b5cce5c3bb4838c]*/
1142 {
1143 signed char *ncp;
1144 Py_ssize_t i;
1145 unsigned int val = 0, mask;
1146 PyObject *rv;
1147
1148 if (!audioop_check_parameters(module, fragment->len, width))
1149 return NULL;
1150
1151 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1152 if (rv == NULL)
1153 return NULL;
1154 ncp = (signed char *)PyBytes_AsString(rv);
1155
1156 mask = masks[width];
1157
1158 for (i = 0; i < fragment->len; i += width) {
1159 if (width == 1)
1160 val = GETINTX(unsigned char, fragment->buf, i);
1161 else if (width == 2)
1162 val = GETINTX(uint16_t, fragment->buf, i);
1163 else if (width == 3)
1164 val = ((unsigned int)GETINT24(fragment->buf, i)) & 0xffffffu;
1165 else {
1166 assert(width == 4);
1167 val = GETINTX(uint32_t, fragment->buf, i);
1168 }
1169
1170 val += (unsigned int)bias;
1171 /* wrap around in case of overflow */
1172 val &= mask;
1173
1174 if (width == 1)
1175 SETINTX(unsigned char, ncp, i, val);
1176 else if (width == 2)
1177 SETINTX(uint16_t, ncp, i, val);
1178 else if (width == 3)
1179 SETINT24(ncp, i, (int)val);
1180 else {
1181 assert(width == 4);
1182 SETINTX(uint32_t, ncp, i, val);
1183 }
1184 }
1185 return rv;
1186 }
1187
1188 /*[clinic input]
1189 audioop.reverse
1190
1191 fragment: Py_buffer
1192 width: int
1193 /
1194
1195 Reverse the samples in a fragment and returns the modified fragment.
1196 [clinic start generated code]*/
1197
1198 static PyObject *
audioop_reverse_impl(PyObject * module,Py_buffer * fragment,int width)1199 audioop_reverse_impl(PyObject *module, Py_buffer *fragment, int width)
1200 /*[clinic end generated code: output=b44135698418da14 input=668f890cf9f9d225]*/
1201 {
1202 unsigned char *ncp;
1203 Py_ssize_t i;
1204 PyObject *rv;
1205
1206 if (!audioop_check_parameters(module, fragment->len, width))
1207 return NULL;
1208
1209 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1210 if (rv == NULL)
1211 return NULL;
1212 ncp = (unsigned char *)PyBytes_AsString(rv);
1213
1214 for (i = 0; i < fragment->len; i += width) {
1215 int val = GETRAWSAMPLE(width, fragment->buf, i);
1216 SETRAWSAMPLE(width, ncp, fragment->len - i - width, val);
1217 }
1218 return rv;
1219 }
1220
1221 /*[clinic input]
1222 audioop.byteswap
1223
1224 fragment: Py_buffer
1225 width: int
1226 /
1227
1228 Convert big-endian samples to little-endian and vice versa.
1229 [clinic start generated code]*/
1230
1231 static PyObject *
audioop_byteswap_impl(PyObject * module,Py_buffer * fragment,int width)1232 audioop_byteswap_impl(PyObject *module, Py_buffer *fragment, int width)
1233 /*[clinic end generated code: output=50838a9e4b87cd4d input=fae7611ceffa5c82]*/
1234 {
1235 unsigned char *ncp;
1236 Py_ssize_t i;
1237 PyObject *rv;
1238
1239 if (!audioop_check_parameters(module, fragment->len, width))
1240 return NULL;
1241
1242 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1243 if (rv == NULL)
1244 return NULL;
1245 ncp = (unsigned char *)PyBytes_AsString(rv);
1246
1247 for (i = 0; i < fragment->len; i += width) {
1248 int j;
1249 for (j = 0; j < width; j++)
1250 ncp[i + width - 1 - j] = ((unsigned char *)fragment->buf)[i + j];
1251 }
1252 return rv;
1253 }
1254
1255 /*[clinic input]
1256 audioop.lin2lin
1257
1258 fragment: Py_buffer
1259 width: int
1260 newwidth: int
1261 /
1262
1263 Convert samples between 1-, 2-, 3- and 4-byte formats.
1264 [clinic start generated code]*/
1265
1266 static PyObject *
audioop_lin2lin_impl(PyObject * module,Py_buffer * fragment,int width,int newwidth)1267 audioop_lin2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1268 int newwidth)
1269 /*[clinic end generated code: output=17b14109248f1d99 input=5ce08c8aa2f24d96]*/
1270 {
1271 unsigned char *ncp;
1272 Py_ssize_t i, j;
1273 PyObject *rv;
1274
1275 if (!audioop_check_parameters(module, fragment->len, width))
1276 return NULL;
1277 if (!audioop_check_size(module, newwidth))
1278 return NULL;
1279
1280 if (fragment->len/width > PY_SSIZE_T_MAX/newwidth) {
1281 PyErr_SetString(PyExc_MemoryError,
1282 "not enough memory for output buffer");
1283 return NULL;
1284 }
1285 rv = PyBytes_FromStringAndSize(NULL, (fragment->len/width)*newwidth);
1286 if (rv == NULL)
1287 return NULL;
1288 ncp = (unsigned char *)PyBytes_AsString(rv);
1289
1290 for (i = j = 0; i < fragment->len; i += width, j += newwidth) {
1291 int val = GETSAMPLE32(width, fragment->buf, i);
1292 SETSAMPLE32(newwidth, ncp, j, val);
1293 }
1294 return rv;
1295 }
1296
1297 static int
gcd(int a,int b)1298 gcd(int a, int b)
1299 {
1300 while (b > 0) {
1301 int tmp = a % b;
1302 a = b;
1303 b = tmp;
1304 }
1305 return a;
1306 }
1307
1308 /*[clinic input]
1309 audioop.ratecv
1310
1311 fragment: Py_buffer
1312 width: int
1313 nchannels: int
1314 inrate: int
1315 outrate: int
1316 state: object
1317 weightA: int = 1
1318 weightB: int = 0
1319 /
1320
1321 Convert the frame rate of the input fragment.
1322 [clinic start generated code]*/
1323
1324 static PyObject *
audioop_ratecv_impl(PyObject * module,Py_buffer * fragment,int width,int nchannels,int inrate,int outrate,PyObject * state,int weightA,int weightB)1325 audioop_ratecv_impl(PyObject *module, Py_buffer *fragment, int width,
1326 int nchannels, int inrate, int outrate, PyObject *state,
1327 int weightA, int weightB)
1328 /*[clinic end generated code: output=624038e843243139 input=aff3acdc94476191]*/
1329 {
1330 char *cp, *ncp;
1331 Py_ssize_t len;
1332 int chan, d, *prev_i, *cur_i, cur_o;
1333 PyObject *samps, *str, *rv = NULL, *channel;
1334 int bytes_per_frame;
1335
1336 if (!audioop_check_size(module, width))
1337 return NULL;
1338 if (nchannels < 1) {
1339 PyErr_SetString(get_audioop_state(module)->AudioopError,
1340 "# of channels should be >= 1");
1341 return NULL;
1342 }
1343 if (width > INT_MAX / nchannels) {
1344 /* This overflow test is rigorously correct because
1345 both multiplicands are >= 1. Use the argument names
1346 from the docs for the error msg. */
1347 PyErr_SetString(PyExc_OverflowError,
1348 "width * nchannels too big for a C int");
1349 return NULL;
1350 }
1351 bytes_per_frame = width * nchannels;
1352 if (weightA < 1 || weightB < 0) {
1353 PyErr_SetString(get_audioop_state(module)->AudioopError,
1354 "weightA should be >= 1, weightB should be >= 0");
1355 return NULL;
1356 }
1357 assert(fragment->len >= 0);
1358 if (fragment->len % bytes_per_frame != 0) {
1359 PyErr_SetString(get_audioop_state(module)->AudioopError,
1360 "not a whole number of frames");
1361 return NULL;
1362 }
1363 if (inrate <= 0 || outrate <= 0) {
1364 PyErr_SetString(get_audioop_state(module)->AudioopError,
1365 "sampling rate not > 0");
1366 return NULL;
1367 }
1368 /* divide inrate and outrate by their greatest common divisor */
1369 d = gcd(inrate, outrate);
1370 inrate /= d;
1371 outrate /= d;
1372 /* divide weightA and weightB by their greatest common divisor */
1373 d = gcd(weightA, weightB);
1374 weightA /= d;
1375 weightB /= d;
1376
1377 if ((size_t)nchannels > SIZE_MAX/sizeof(int)) {
1378 PyErr_SetString(PyExc_MemoryError,
1379 "not enough memory for output buffer");
1380 return NULL;
1381 }
1382 prev_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1383 cur_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1384 if (prev_i == NULL || cur_i == NULL) {
1385 (void) PyErr_NoMemory();
1386 goto exit;
1387 }
1388
1389 len = fragment->len / bytes_per_frame; /* # of frames */
1390
1391 if (state == Py_None) {
1392 d = -outrate;
1393 for (chan = 0; chan < nchannels; chan++)
1394 prev_i[chan] = cur_i[chan] = 0;
1395 }
1396 else {
1397 if (!PyTuple_Check(state)) {
1398 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1399 goto exit;
1400 }
1401 if (!PyArg_ParseTuple(state,
1402 "iO!;ratecv(): illegal state argument",
1403 &d, &PyTuple_Type, &samps))
1404 goto exit;
1405 if (PyTuple_Size(samps) != nchannels) {
1406 PyErr_SetString(get_audioop_state(module)->AudioopError,
1407 "illegal state argument");
1408 goto exit;
1409 }
1410 for (chan = 0; chan < nchannels; chan++) {
1411 channel = PyTuple_GetItem(samps, chan);
1412 if (!PyTuple_Check(channel)) {
1413 PyErr_SetString(PyExc_TypeError,
1414 "ratecv(): illegal state argument");
1415 goto exit;
1416 }
1417 if (!PyArg_ParseTuple(channel,
1418 "ii;ratecv(): illegal state argument",
1419 &prev_i[chan], &cur_i[chan]))
1420 {
1421 goto exit;
1422 }
1423 }
1424 }
1425
1426 /* str <- Space for the output buffer. */
1427 if (len == 0)
1428 str = PyBytes_FromStringAndSize(NULL, 0);
1429 else {
1430 /* There are len input frames, so we need (mathematically)
1431 ceiling(len*outrate/inrate) output frames, and each frame
1432 requires bytes_per_frame bytes. Computing this
1433 without spurious overflow is the challenge; we can
1434 settle for a reasonable upper bound, though, in this
1435 case ceiling(len/inrate) * outrate. */
1436
1437 /* compute ceiling(len/inrate) without overflow */
1438 Py_ssize_t q = 1 + (len - 1) / inrate;
1439 if (outrate > PY_SSIZE_T_MAX / q / bytes_per_frame)
1440 str = NULL;
1441 else
1442 str = PyBytes_FromStringAndSize(NULL,
1443 q * outrate * bytes_per_frame);
1444 }
1445 if (str == NULL) {
1446 PyErr_SetString(PyExc_MemoryError,
1447 "not enough memory for output buffer");
1448 goto exit;
1449 }
1450 ncp = PyBytes_AsString(str);
1451 cp = fragment->buf;
1452
1453 for (;;) {
1454 while (d < 0) {
1455 if (len == 0) {
1456 samps = PyTuple_New(nchannels);
1457 if (samps == NULL)
1458 goto exit;
1459 for (chan = 0; chan < nchannels; chan++)
1460 PyTuple_SetItem(samps, chan,
1461 Py_BuildValue("(ii)",
1462 prev_i[chan],
1463 cur_i[chan]));
1464 if (PyErr_Occurred())
1465 goto exit;
1466 /* We have checked before that the length
1467 * of the string fits into int. */
1468 len = (Py_ssize_t)(ncp - PyBytes_AsString(str));
1469 rv = PyBytes_FromStringAndSize
1470 (PyBytes_AsString(str), len);
1471 Py_DECREF(str);
1472 str = rv;
1473 if (str == NULL)
1474 goto exit;
1475 rv = Py_BuildValue("(O(iO))", str, d, samps);
1476 Py_DECREF(samps);
1477 Py_DECREF(str);
1478 goto exit; /* return rv */
1479 }
1480 for (chan = 0; chan < nchannels; chan++) {
1481 prev_i[chan] = cur_i[chan];
1482 cur_i[chan] = GETSAMPLE32(width, cp, 0);
1483 cp += width;
1484 /* implements a simple digital filter */
1485 cur_i[chan] = (int)(
1486 ((double)weightA * (double)cur_i[chan] +
1487 (double)weightB * (double)prev_i[chan]) /
1488 ((double)weightA + (double)weightB));
1489 }
1490 len--;
1491 d += outrate;
1492 }
1493 while (d >= 0) {
1494 for (chan = 0; chan < nchannels; chan++) {
1495 cur_o = (int)(((double)prev_i[chan] * (double)d +
1496 (double)cur_i[chan] * (double)(outrate - d)) /
1497 (double)outrate);
1498 SETSAMPLE32(width, ncp, 0, cur_o);
1499 ncp += width;
1500 }
1501 d -= inrate;
1502 }
1503 }
1504 exit:
1505 PyMem_Free(prev_i);
1506 PyMem_Free(cur_i);
1507 return rv;
1508 }
1509
1510 /*[clinic input]
1511 audioop.lin2ulaw
1512
1513 fragment: Py_buffer
1514 width: int
1515 /
1516
1517 Convert samples in the audio fragment to u-LAW encoding.
1518 [clinic start generated code]*/
1519
1520 static PyObject *
audioop_lin2ulaw_impl(PyObject * module,Py_buffer * fragment,int width)1521 audioop_lin2ulaw_impl(PyObject *module, Py_buffer *fragment, int width)
1522 /*[clinic end generated code: output=14fb62b16fe8ea8e input=2450d1b870b6bac2]*/
1523 {
1524 unsigned char *ncp;
1525 Py_ssize_t i;
1526 PyObject *rv;
1527
1528 if (!audioop_check_parameters(module, fragment->len, width))
1529 return NULL;
1530
1531 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1532 if (rv == NULL)
1533 return NULL;
1534 ncp = (unsigned char *)PyBytes_AsString(rv);
1535
1536 for (i = 0; i < fragment->len; i += width) {
1537 int val = GETSAMPLE32(width, fragment->buf, i);
1538 *ncp++ = st_14linear2ulaw(val >> 18);
1539 }
1540 return rv;
1541 }
1542
1543 /*[clinic input]
1544 audioop.ulaw2lin
1545
1546 fragment: Py_buffer
1547 width: int
1548 /
1549
1550 Convert sound fragments in u-LAW encoding to linearly encoded sound fragments.
1551 [clinic start generated code]*/
1552
1553 static PyObject *
audioop_ulaw2lin_impl(PyObject * module,Py_buffer * fragment,int width)1554 audioop_ulaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1555 /*[clinic end generated code: output=378356b047521ba2 input=45d53ddce5be7d06]*/
1556 {
1557 unsigned char *cp;
1558 signed char *ncp;
1559 Py_ssize_t i;
1560 PyObject *rv;
1561
1562 if (!audioop_check_size(module, width))
1563 return NULL;
1564
1565 if (fragment->len > PY_SSIZE_T_MAX/width) {
1566 PyErr_SetString(PyExc_MemoryError,
1567 "not enough memory for output buffer");
1568 return NULL;
1569 }
1570 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1571 if (rv == NULL)
1572 return NULL;
1573 ncp = (signed char *)PyBytes_AsString(rv);
1574
1575 cp = fragment->buf;
1576 for (i = 0; i < fragment->len*width; i += width) {
1577 int val = st_ulaw2linear16(*cp++) << 16;
1578 SETSAMPLE32(width, ncp, i, val);
1579 }
1580 return rv;
1581 }
1582
1583 /*[clinic input]
1584 audioop.lin2alaw
1585
1586 fragment: Py_buffer
1587 width: int
1588 /
1589
1590 Convert samples in the audio fragment to a-LAW encoding.
1591 [clinic start generated code]*/
1592
1593 static PyObject *
audioop_lin2alaw_impl(PyObject * module,Py_buffer * fragment,int width)1594 audioop_lin2alaw_impl(PyObject *module, Py_buffer *fragment, int width)
1595 /*[clinic end generated code: output=d076f130121a82f0 input=ffb1ef8bb39da945]*/
1596 {
1597 unsigned char *ncp;
1598 Py_ssize_t i;
1599 PyObject *rv;
1600
1601 if (!audioop_check_parameters(module, fragment->len, width))
1602 return NULL;
1603
1604 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1605 if (rv == NULL)
1606 return NULL;
1607 ncp = (unsigned char *)PyBytes_AsString(rv);
1608
1609 for (i = 0; i < fragment->len; i += width) {
1610 int val = GETSAMPLE32(width, fragment->buf, i);
1611 *ncp++ = st_linear2alaw(val >> 19);
1612 }
1613 return rv;
1614 }
1615
1616 /*[clinic input]
1617 audioop.alaw2lin
1618
1619 fragment: Py_buffer
1620 width: int
1621 /
1622
1623 Convert sound fragments in a-LAW encoding to linearly encoded sound fragments.
1624 [clinic start generated code]*/
1625
1626 static PyObject *
audioop_alaw2lin_impl(PyObject * module,Py_buffer * fragment,int width)1627 audioop_alaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1628 /*[clinic end generated code: output=85c365ec559df647 input=4140626046cd1772]*/
1629 {
1630 unsigned char *cp;
1631 signed char *ncp;
1632 Py_ssize_t i;
1633 int val;
1634 PyObject *rv;
1635
1636 if (!audioop_check_size(module, width))
1637 return NULL;
1638
1639 if (fragment->len > PY_SSIZE_T_MAX/width) {
1640 PyErr_SetString(PyExc_MemoryError,
1641 "not enough memory for output buffer");
1642 return NULL;
1643 }
1644 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1645 if (rv == NULL)
1646 return NULL;
1647 ncp = (signed char *)PyBytes_AsString(rv);
1648 cp = fragment->buf;
1649
1650 for (i = 0; i < fragment->len*width; i += width) {
1651 val = st_alaw2linear16(*cp++) << 16;
1652 SETSAMPLE32(width, ncp, i, val);
1653 }
1654 return rv;
1655 }
1656
1657 /*[clinic input]
1658 audioop.lin2adpcm
1659
1660 fragment: Py_buffer
1661 width: int
1662 state: object
1663 /
1664
1665 Convert samples to 4 bit Intel/DVI ADPCM encoding.
1666 [clinic start generated code]*/
1667
1668 static PyObject *
audioop_lin2adpcm_impl(PyObject * module,Py_buffer * fragment,int width,PyObject * state)1669 audioop_lin2adpcm_impl(PyObject *module, Py_buffer *fragment, int width,
1670 PyObject *state)
1671 /*[clinic end generated code: output=cc19f159f16c6793 input=12919d549b90c90a]*/
1672 {
1673 signed char *ncp;
1674 Py_ssize_t i;
1675 int step, valpred, delta,
1676 index, sign, vpdiff, diff;
1677 PyObject *rv = NULL, *str;
1678 int outputbuffer = 0, bufferstep;
1679
1680 if (!audioop_check_parameters(module, fragment->len, width))
1681 return NULL;
1682
1683 /* Decode state, should have (value, step) */
1684 if ( state == Py_None ) {
1685 /* First time, it seems. Set defaults */
1686 valpred = 0;
1687 index = 0;
1688 }
1689 else if (!PyTuple_Check(state)) {
1690 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1691 return NULL;
1692 }
1693 else if (!PyArg_ParseTuple(state, "ii;lin2adpcm(): illegal state argument",
1694 &valpred, &index))
1695 {
1696 return NULL;
1697 }
1698 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1699 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1700 PyErr_SetString(PyExc_ValueError, "bad state");
1701 return NULL;
1702 }
1703
1704 str = PyBytes_FromStringAndSize(NULL, fragment->len/(width*2));
1705 if (str == NULL)
1706 return NULL;
1707 ncp = (signed char *)PyBytes_AsString(str);
1708
1709 step = stepsizeTable[index];
1710 bufferstep = 1;
1711
1712 for (i = 0; i < fragment->len; i += width) {
1713 int val = GETSAMPLE32(width, fragment->buf, i) >> 16;
1714
1715 /* Step 1 - compute difference with previous value */
1716 if (val < valpred) {
1717 diff = valpred - val;
1718 sign = 8;
1719 }
1720 else {
1721 diff = val - valpred;
1722 sign = 0;
1723 }
1724
1725 /* Step 2 - Divide and clamp */
1726 /* Note:
1727 ** This code *approximately* computes:
1728 ** delta = diff*4/step;
1729 ** vpdiff = (delta+0.5)*step/4;
1730 ** but in shift step bits are dropped. The net result of this
1731 ** is that even if you have fast mul/div hardware you cannot
1732 ** put it to good use since the fixup would be too expensive.
1733 */
1734 delta = 0;
1735 vpdiff = (step >> 3);
1736
1737 if ( diff >= step ) {
1738 delta = 4;
1739 diff -= step;
1740 vpdiff += step;
1741 }
1742 step >>= 1;
1743 if ( diff >= step ) {
1744 delta |= 2;
1745 diff -= step;
1746 vpdiff += step;
1747 }
1748 step >>= 1;
1749 if ( diff >= step ) {
1750 delta |= 1;
1751 vpdiff += step;
1752 }
1753
1754 /* Step 3 - Update previous value */
1755 if ( sign )
1756 valpred -= vpdiff;
1757 else
1758 valpred += vpdiff;
1759
1760 /* Step 4 - Clamp previous value to 16 bits */
1761 if ( valpred > 32767 )
1762 valpred = 32767;
1763 else if ( valpred < -32768 )
1764 valpred = -32768;
1765
1766 /* Step 5 - Assemble value, update index and step values */
1767 delta |= sign;
1768
1769 index += indexTable[delta];
1770 if ( index < 0 ) index = 0;
1771 if ( index > 88 ) index = 88;
1772 step = stepsizeTable[index];
1773
1774 /* Step 6 - Output value */
1775 if ( bufferstep ) {
1776 outputbuffer = (delta << 4) & 0xf0;
1777 } else {
1778 *ncp++ = (delta & 0x0f) | outputbuffer;
1779 }
1780 bufferstep = !bufferstep;
1781 }
1782 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1783 Py_DECREF(str);
1784 return rv;
1785 }
1786
1787 /*[clinic input]
1788 audioop.adpcm2lin
1789
1790 fragment: Py_buffer
1791 width: int
1792 state: object
1793 /
1794
1795 Decode an Intel/DVI ADPCM coded fragment to a linear fragment.
1796 [clinic start generated code]*/
1797
1798 static PyObject *
audioop_adpcm2lin_impl(PyObject * module,Py_buffer * fragment,int width,PyObject * state)1799 audioop_adpcm2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1800 PyObject *state)
1801 /*[clinic end generated code: output=3440ea105acb3456 input=f5221144f5ca9ef0]*/
1802 {
1803 signed char *cp;
1804 signed char *ncp;
1805 Py_ssize_t i, outlen;
1806 int valpred, step, delta, index, sign, vpdiff;
1807 PyObject *rv, *str;
1808 int inputbuffer = 0, bufferstep;
1809
1810 if (!audioop_check_size(module, width))
1811 return NULL;
1812
1813 /* Decode state, should have (value, step) */
1814 if ( state == Py_None ) {
1815 /* First time, it seems. Set defaults */
1816 valpred = 0;
1817 index = 0;
1818 }
1819 else if (!PyTuple_Check(state)) {
1820 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1821 return NULL;
1822 }
1823 else if (!PyArg_ParseTuple(state, "ii;adpcm2lin(): illegal state argument",
1824 &valpred, &index))
1825 {
1826 return NULL;
1827 }
1828 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1829 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1830 PyErr_SetString(PyExc_ValueError, "bad state");
1831 return NULL;
1832 }
1833
1834 if (fragment->len > (PY_SSIZE_T_MAX/2)/width) {
1835 PyErr_SetString(PyExc_MemoryError,
1836 "not enough memory for output buffer");
1837 return NULL;
1838 }
1839 outlen = fragment->len*width*2;
1840 str = PyBytes_FromStringAndSize(NULL, outlen);
1841 if (str == NULL)
1842 return NULL;
1843 ncp = (signed char *)PyBytes_AsString(str);
1844 cp = fragment->buf;
1845
1846 step = stepsizeTable[index];
1847 bufferstep = 0;
1848
1849 for (i = 0; i < outlen; i += width) {
1850 /* Step 1 - get the delta value and compute next index */
1851 if ( bufferstep ) {
1852 delta = inputbuffer & 0xf;
1853 } else {
1854 inputbuffer = *cp++;
1855 delta = (inputbuffer >> 4) & 0xf;
1856 }
1857
1858 bufferstep = !bufferstep;
1859
1860 /* Step 2 - Find new index value (for later) */
1861 index += indexTable[delta];
1862 if ( index < 0 ) index = 0;
1863 if ( index > 88 ) index = 88;
1864
1865 /* Step 3 - Separate sign and magnitude */
1866 sign = delta & 8;
1867 delta = delta & 7;
1868
1869 /* Step 4 - Compute difference and new predicted value */
1870 /*
1871 ** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
1872 ** in adpcm_coder.
1873 */
1874 vpdiff = step >> 3;
1875 if ( delta & 4 ) vpdiff += step;
1876 if ( delta & 2 ) vpdiff += step>>1;
1877 if ( delta & 1 ) vpdiff += step>>2;
1878
1879 if ( sign )
1880 valpred -= vpdiff;
1881 else
1882 valpred += vpdiff;
1883
1884 /* Step 5 - clamp output value */
1885 if ( valpred > 32767 )
1886 valpred = 32767;
1887 else if ( valpred < -32768 )
1888 valpred = -32768;
1889
1890 /* Step 6 - Update step value */
1891 step = stepsizeTable[index];
1892
1893 /* Step 6 - Output value */
1894 SETSAMPLE32(width, ncp, i, valpred << 16);
1895 }
1896
1897 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1898 Py_DECREF(str);
1899 return rv;
1900 }
1901
1902 #include "clinic/audioop.c.h"
1903
1904 static PyMethodDef audioop_methods[] = {
1905 AUDIOOP_MAX_METHODDEF
1906 AUDIOOP_MINMAX_METHODDEF
1907 AUDIOOP_AVG_METHODDEF
1908 AUDIOOP_MAXPP_METHODDEF
1909 AUDIOOP_AVGPP_METHODDEF
1910 AUDIOOP_RMS_METHODDEF
1911 AUDIOOP_FINDFIT_METHODDEF
1912 AUDIOOP_FINDMAX_METHODDEF
1913 AUDIOOP_FINDFACTOR_METHODDEF
1914 AUDIOOP_CROSS_METHODDEF
1915 AUDIOOP_MUL_METHODDEF
1916 AUDIOOP_ADD_METHODDEF
1917 AUDIOOP_BIAS_METHODDEF
1918 AUDIOOP_ULAW2LIN_METHODDEF
1919 AUDIOOP_LIN2ULAW_METHODDEF
1920 AUDIOOP_ALAW2LIN_METHODDEF
1921 AUDIOOP_LIN2ALAW_METHODDEF
1922 AUDIOOP_LIN2LIN_METHODDEF
1923 AUDIOOP_ADPCM2LIN_METHODDEF
1924 AUDIOOP_LIN2ADPCM_METHODDEF
1925 AUDIOOP_TOMONO_METHODDEF
1926 AUDIOOP_TOSTEREO_METHODDEF
1927 AUDIOOP_GETSAMPLE_METHODDEF
1928 AUDIOOP_REVERSE_METHODDEF
1929 AUDIOOP_BYTESWAP_METHODDEF
1930 AUDIOOP_RATECV_METHODDEF
1931 { 0, 0 }
1932 };
1933
1934 static int
audioop_traverse(PyObject * module,visitproc visit,void * arg)1935 audioop_traverse(PyObject *module, visitproc visit, void *arg)
1936 {
1937 audioop_state *state = get_audioop_state(module);
1938 Py_VISIT(state->AudioopError);
1939 return 0;
1940 }
1941
1942 static int
audioop_clear(PyObject * module)1943 audioop_clear(PyObject *module)
1944 {
1945 audioop_state *state = get_audioop_state(module);
1946 Py_CLEAR(state->AudioopError);
1947 return 0;
1948 }
1949
1950 static void
audioop_free(void * module)1951 audioop_free(void *module) {
1952 audioop_clear((PyObject *)module);
1953 }
1954
1955 static int
audioop_exec(PyObject * module)1956 audioop_exec(PyObject* module)
1957 {
1958 audioop_state *state = get_audioop_state(module);
1959
1960 state->AudioopError = PyErr_NewException("audioop.error", NULL, NULL);
1961 if (state->AudioopError == NULL) {
1962 return -1;
1963 }
1964
1965 Py_INCREF(state->AudioopError);
1966 if (PyModule_AddObject(module, "error", state->AudioopError) < 0) {
1967 Py_DECREF(state->AudioopError);
1968 return -1;
1969 }
1970
1971 return 0;
1972 }
1973
1974 static PyModuleDef_Slot audioop_slots[] = {
1975 {Py_mod_exec, audioop_exec},
1976 {0, NULL}
1977 };
1978
1979 static struct PyModuleDef audioopmodule = {
1980 PyModuleDef_HEAD_INIT,
1981 "audioop",
1982 NULL,
1983 sizeof(audioop_state),
1984 audioop_methods,
1985 audioop_slots,
1986 audioop_traverse,
1987 audioop_clear,
1988 audioop_free
1989 };
1990
1991 PyMODINIT_FUNC
PyInit_audioop(void)1992 PyInit_audioop(void)
1993 {
1994 if (PyErr_WarnEx(PyExc_DeprecationWarning,
1995 "'audioop' is deprecated and slated for removal in "
1996 "Python 3.13",
1997 7)) {
1998 return NULL;
1999 }
2000
2001 return PyModuleDef_Init(&audioopmodule);
2002 }
2003