1 /*
2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 // Modified from the Chromium original:
12 // src/media/base/sinc_resampler.cc
13
14 // Initial input buffer layout, dividing into regions r0_ to r4_ (note: r0_, r3_
15 // and r4_ will move after the first load):
16 //
17 // |----------------|-----------------------------------------|----------------|
18 //
19 // request_frames_
20 // <--------------------------------------------------------->
21 // r0_ (during first load)
22 //
23 // kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2
24 // <---------------> <---------------> <---------------> <--------------->
25 // r1_ r2_ r3_ r4_
26 //
27 // block_size_ == r4_ - r2_
28 // <--------------------------------------->
29 //
30 // request_frames_
31 // <------------------ ... ----------------->
32 // r0_ (during second load)
33 //
34 // On the second request r0_ slides to the right by kKernelSize / 2 and r3_, r4_
35 // and block_size_ are reinitialized via step (3) in the algorithm below.
36 //
37 // These new regions remain constant until a Flush() occurs. While complicated,
38 // this allows us to reduce jitter by always requesting the same amount from the
39 // provided callback.
40 //
41 // The algorithm:
42 //
43 // 1) Allocate input_buffer of size: request_frames_ + kKernelSize; this ensures
44 // there's enough room to read request_frames_ from the callback into region
45 // r0_ (which will move between the first and subsequent passes).
46 //
47 // 2) Let r1_, r2_ each represent half the kernel centered around r0_:
48 //
49 // r0_ = input_buffer_ + kKernelSize / 2
50 // r1_ = input_buffer_
51 // r2_ = r0_
52 //
53 // r0_ is always request_frames_ in size. r1_, r2_ are kKernelSize / 2 in
54 // size. r1_ must be zero initialized to avoid convolution with garbage (see
55 // step (5) for why).
56 //
57 // 3) Let r3_, r4_ each represent half the kernel right aligned with the end of
58 // r0_ and choose block_size_ as the distance in frames between r4_ and r2_:
59 //
60 // r3_ = r0_ + request_frames_ - kKernelSize
61 // r4_ = r0_ + request_frames_ - kKernelSize / 2
62 // block_size_ = r4_ - r2_ = request_frames_ - kKernelSize / 2
63 //
64 // 4) Consume request_frames_ frames into r0_.
65 //
66 // 5) Position kernel centered at start of r2_ and generate output frames until
67 // the kernel is centered at the start of r4_ or we've finished generating
68 // all the output frames.
69 //
70 // 6) Wrap left over data from the r3_ to r1_ and r4_ to r2_.
71 //
72 // 7) If we're on the second load, in order to avoid overwriting the frames we
73 // just wrapped from r4_ we need to slide r0_ to the right by the size of
74 // r4_, which is kKernelSize / 2:
75 //
76 // r0_ = r0_ + kKernelSize / 2 = input_buffer_ + kKernelSize
77 //
78 // r3_, r4_, and block_size_ then need to be reinitialized, so goto (3).
79 //
80 // 8) Else, if we're not on the second load, goto (4).
81 //
82 // Note: we're glossing over how the sub-sample handling works with
83 // `virtual_source_idx_`, etc.
84
85 // MSVC++ requires this to be set before any other includes to get M_PI.
86 #define _USE_MATH_DEFINES
87
88 #include "common_audio/resampler/sinc_resampler.h"
89
90 #include <math.h>
91 #include <stdint.h>
92 #include <string.h>
93
94 #include <limits>
95
96 #include "rtc_base/checks.h"
97 #include "rtc_base/system/arch.h"
98 #include "system_wrappers/include/cpu_features_wrapper.h" // kSSE2, WebRtc_G...
99
100 namespace webrtc {
101
102 namespace {
103
SincScaleFactor(double io_ratio)104 double SincScaleFactor(double io_ratio) {
105 // `sinc_scale_factor` is basically the normalized cutoff frequency of the
106 // low-pass filter.
107 double sinc_scale_factor = io_ratio > 1.0 ? 1.0 / io_ratio : 1.0;
108
109 // The sinc function is an idealized brick-wall filter, but since we're
110 // windowing it the transition from pass to stop does not happen right away.
111 // So we should adjust the low pass filter cutoff slightly downward to avoid
112 // some aliasing at the very high-end.
113 // TODO(crogers): this value is empirical and to be more exact should vary
114 // depending on kKernelSize.
115 sinc_scale_factor *= 0.9;
116
117 return sinc_scale_factor;
118 }
119
120 } // namespace
121
122 const size_t SincResampler::kKernelSize;
123
124 // If we know the minimum architecture at compile time, avoid CPU detection.
InitializeCPUSpecificFeatures()125 void SincResampler::InitializeCPUSpecificFeatures() {
126 #if defined(WEBRTC_HAS_NEON)
127 convolve_proc_ = Convolve_NEON;
128 #elif defined(WEBRTC_ARCH_X86_FAMILY)
129 // Using AVX2 instead of SSE2 when AVX2 supported.
130 if (GetCPUInfo(kAVX2))
131 convolve_proc_ = Convolve_AVX2;
132 else if (GetCPUInfo(kSSE2))
133 convolve_proc_ = Convolve_SSE;
134 else
135 convolve_proc_ = Convolve_C;
136 #else
137 // Unknown architecture.
138 convolve_proc_ = Convolve_C;
139 #endif
140 }
141
SincResampler(double io_sample_rate_ratio,size_t request_frames,SincResamplerCallback * read_cb)142 SincResampler::SincResampler(double io_sample_rate_ratio,
143 size_t request_frames,
144 SincResamplerCallback* read_cb)
145 : io_sample_rate_ratio_(io_sample_rate_ratio),
146 read_cb_(read_cb),
147 request_frames_(request_frames),
148 input_buffer_size_(request_frames_ + kKernelSize),
149 // Create input buffers with a 32-byte alignment for SIMD optimizations.
150 kernel_storage_(static_cast<float*>(
151 AlignedMalloc(sizeof(float) * kKernelStorageSize, 32))),
152 kernel_pre_sinc_storage_(static_cast<float*>(
153 AlignedMalloc(sizeof(float) * kKernelStorageSize, 32))),
154 kernel_window_storage_(static_cast<float*>(
155 AlignedMalloc(sizeof(float) * kKernelStorageSize, 32))),
156 input_buffer_(static_cast<float*>(
157 AlignedMalloc(sizeof(float) * input_buffer_size_, 32))),
158 convolve_proc_(nullptr),
159 r1_(input_buffer_.get()),
160 r2_(input_buffer_.get() + kKernelSize / 2) {
161 InitializeCPUSpecificFeatures();
162 RTC_DCHECK(convolve_proc_);
163 RTC_DCHECK_GT(request_frames_, 0);
164 Flush();
165 RTC_DCHECK_GT(block_size_, kKernelSize);
166
167 memset(kernel_storage_.get(), 0,
168 sizeof(*kernel_storage_.get()) * kKernelStorageSize);
169 memset(kernel_pre_sinc_storage_.get(), 0,
170 sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize);
171 memset(kernel_window_storage_.get(), 0,
172 sizeof(*kernel_window_storage_.get()) * kKernelStorageSize);
173
174 InitializeKernel();
175 }
176
~SincResampler()177 SincResampler::~SincResampler() {}
178
UpdateRegions(bool second_load)179 void SincResampler::UpdateRegions(bool second_load) {
180 // Setup various region pointers in the buffer (see diagram above). If we're
181 // on the second load we need to slide r0_ to the right by kKernelSize / 2.
182 r0_ = input_buffer_.get() + (second_load ? kKernelSize : kKernelSize / 2);
183 r3_ = r0_ + request_frames_ - kKernelSize;
184 r4_ = r0_ + request_frames_ - kKernelSize / 2;
185 block_size_ = r4_ - r2_;
186
187 // r1_ at the beginning of the buffer.
188 RTC_DCHECK_EQ(r1_, input_buffer_.get());
189 // r1_ left of r2_, r4_ left of r3_ and size correct.
190 RTC_DCHECK_EQ(r2_ - r1_, r4_ - r3_);
191 // r2_ left of r3.
192 RTC_DCHECK_LT(r2_, r3_);
193 }
194
InitializeKernel()195 void SincResampler::InitializeKernel() {
196 // Blackman window parameters.
197 static const double kAlpha = 0.16;
198 static const double kA0 = 0.5 * (1.0 - kAlpha);
199 static const double kA1 = 0.5;
200 static const double kA2 = 0.5 * kAlpha;
201
202 // Generates a set of windowed sinc() kernels.
203 // We generate a range of sub-sample offsets from 0.0 to 1.0.
204 const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
205 for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
206 const float subsample_offset =
207 static_cast<float>(offset_idx) / kKernelOffsetCount;
208
209 for (size_t i = 0; i < kKernelSize; ++i) {
210 const size_t idx = i + offset_idx * kKernelSize;
211 const float pre_sinc = static_cast<float>(
212 M_PI * (static_cast<int>(i) - static_cast<int>(kKernelSize / 2) -
213 subsample_offset));
214 kernel_pre_sinc_storage_[idx] = pre_sinc;
215
216 // Compute Blackman window, matching the offset of the sinc().
217 const float x = (i - subsample_offset) / kKernelSize;
218 const float window = static_cast<float>(kA0 - kA1 * cos(2.0 * M_PI * x) +
219 kA2 * cos(4.0 * M_PI * x));
220 kernel_window_storage_[idx] = window;
221
222 // Compute the sinc with offset, then window the sinc() function and store
223 // at the correct offset.
224 kernel_storage_[idx] = static_cast<float>(
225 window * ((pre_sinc == 0)
226 ? sinc_scale_factor
227 : (sin(sinc_scale_factor * pre_sinc) / pre_sinc)));
228 }
229 }
230 }
231
SetRatio(double io_sample_rate_ratio)232 void SincResampler::SetRatio(double io_sample_rate_ratio) {
233 if (fabs(io_sample_rate_ratio_ - io_sample_rate_ratio) <
234 std::numeric_limits<double>::epsilon()) {
235 return;
236 }
237
238 io_sample_rate_ratio_ = io_sample_rate_ratio;
239
240 // Optimize reinitialization by reusing values which are independent of
241 // `sinc_scale_factor`. Provides a 3x speedup.
242 const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
243 for (size_t offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
244 for (size_t i = 0; i < kKernelSize; ++i) {
245 const size_t idx = i + offset_idx * kKernelSize;
246 const float window = kernel_window_storage_[idx];
247 const float pre_sinc = kernel_pre_sinc_storage_[idx];
248
249 kernel_storage_[idx] = static_cast<float>(
250 window * ((pre_sinc == 0)
251 ? sinc_scale_factor
252 : (sin(sinc_scale_factor * pre_sinc) / pre_sinc)));
253 }
254 }
255 }
256
Resample(size_t frames,float * destination)257 void SincResampler::Resample(size_t frames, float* destination) {
258 size_t remaining_frames = frames;
259
260 // Step (1) -- Prime the input buffer at the start of the input stream.
261 if (!buffer_primed_ && remaining_frames) {
262 read_cb_->Run(request_frames_, r0_);
263 buffer_primed_ = true;
264 }
265
266 // Step (2) -- Resample! const what we can outside of the loop for speed. It
267 // actually has an impact on ARM performance. See inner loop comment below.
268 const double current_io_ratio = io_sample_rate_ratio_;
269 const float* const kernel_ptr = kernel_storage_.get();
270 while (remaining_frames) {
271 // `i` may be negative if the last Resample() call ended on an iteration
272 // that put `virtual_source_idx_` over the limit.
273 //
274 // Note: The loop construct here can severely impact performance on ARM
275 // or when built with clang. See https://codereview.chromium.org/18566009/
276 for (int i = static_cast<int>(
277 ceil((block_size_ - virtual_source_idx_) / current_io_ratio));
278 i > 0; --i) {
279 RTC_DCHECK_LT(virtual_source_idx_, block_size_);
280
281 // `virtual_source_idx_` lies in between two kernel offsets so figure out
282 // what they are.
283 const int source_idx = static_cast<int>(virtual_source_idx_);
284 const double subsample_remainder = virtual_source_idx_ - source_idx;
285
286 const double virtual_offset_idx =
287 subsample_remainder * kKernelOffsetCount;
288 const int offset_idx = static_cast<int>(virtual_offset_idx);
289
290 // We'll compute "convolutions" for the two kernels which straddle
291 // `virtual_source_idx_`.
292 const float* const k1 = kernel_ptr + offset_idx * kKernelSize;
293 const float* const k2 = k1 + kKernelSize;
294
295 // Ensure `k1`, `k2` are 32-byte aligned for SIMD usage. Should always be
296 // true so long as kKernelSize is a multiple of 32.
297 RTC_DCHECK_EQ(0, reinterpret_cast<uintptr_t>(k1) % 32);
298 RTC_DCHECK_EQ(0, reinterpret_cast<uintptr_t>(k2) % 32);
299
300 // Initialize input pointer based on quantized `virtual_source_idx_`.
301 const float* const input_ptr = r1_ + source_idx;
302
303 // Figure out how much to weight each kernel's "convolution".
304 const double kernel_interpolation_factor =
305 virtual_offset_idx - offset_idx;
306 *destination++ =
307 convolve_proc_(input_ptr, k1, k2, kernel_interpolation_factor);
308
309 // Advance the virtual index.
310 virtual_source_idx_ += current_io_ratio;
311
312 if (!--remaining_frames)
313 return;
314 }
315
316 // Wrap back around to the start.
317 virtual_source_idx_ -= block_size_;
318
319 // Step (3) -- Copy r3_, r4_ to r1_, r2_.
320 // This wraps the last input frames back to the start of the buffer.
321 memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize);
322
323 // Step (4) -- Reinitialize regions if necessary.
324 if (r0_ == r2_)
325 UpdateRegions(true);
326
327 // Step (5) -- Refresh the buffer with more input.
328 read_cb_->Run(request_frames_, r0_);
329 }
330 }
331
332 #undef CONVOLVE_FUNC
333
ChunkSize() const334 size_t SincResampler::ChunkSize() const {
335 return static_cast<size_t>(block_size_ / io_sample_rate_ratio_);
336 }
337
Flush()338 void SincResampler::Flush() {
339 virtual_source_idx_ = 0;
340 buffer_primed_ = false;
341 memset(input_buffer_.get(), 0,
342 sizeof(*input_buffer_.get()) * input_buffer_size_);
343 UpdateRegions(false);
344 }
345
Convolve_C(const float * input_ptr,const float * k1,const float * k2,double kernel_interpolation_factor)346 float SincResampler::Convolve_C(const float* input_ptr,
347 const float* k1,
348 const float* k2,
349 double kernel_interpolation_factor) {
350 float sum1 = 0;
351 float sum2 = 0;
352
353 // Generate a single output sample. Unrolling this loop hurt performance in
354 // local testing.
355 size_t n = kKernelSize;
356 while (n--) {
357 sum1 += *input_ptr * *k1++;
358 sum2 += *input_ptr++ * *k2++;
359 }
360
361 // Linearly interpolate the two "convolutions".
362 return static_cast<float>((1.0 - kernel_interpolation_factor) * sum1 +
363 kernel_interpolation_factor * sum2);
364 }
365
366 } // namespace webrtc
367