Chromium Code Reviews| Index: media/base/sinc_resampler.cc |
| diff --git a/media/base/sinc_resampler.cc b/media/base/sinc_resampler.cc |
| index 00f9314c61078b5762b59a86cb8a8f2bdf2bf0a1..e504a24f92c3765687918476537d9ded93c1e6a6 100644 |
| --- a/media/base/sinc_resampler.cc |
| +++ b/media/base/sinc_resampler.cc |
| @@ -55,6 +55,10 @@ SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb) |
| // Create input buffers with a 16-byte alignment for SSE optimizations. |
| kernel_storage_(static_cast<float*>( |
| base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), |
| + kernel_pre_sinc_storage_(static_cast<float*>( |
| + base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), |
| + kernel_window_storage_(static_cast<float*>( |
| + base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), |
| input_buffer_(static_cast<float*>( |
| base::AlignedAlloc(sizeof(float) * kBufferSize, 16))), |
| #if defined(ARCH_CPU_X86_FAMILY) && !defined(__SSE__) |
| @@ -89,14 +93,18 @@ SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb) |
| memset(kernel_storage_.get(), 0, |
| sizeof(*kernel_storage_.get()) * kKernelStorageSize); |
| + memset(kernel_pre_sinc_storage_.get(), 0, |
| + sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize); |
| + memset(kernel_window_storage_.get(), 0, |
| + sizeof(*kernel_window_storage_.get()) * kKernelStorageSize); |
| memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize); |
| - InitializeKernel(); |
| + InitializeKernel(true); |
| } |
| SincResampler::~SincResampler() {} |
| -void SincResampler::InitializeKernel() { |
| +void SincResampler::InitializeKernel(bool first_run) { |
| // Blackman window parameters. |
| static const double kAlpha = 0.16; |
| static const double kA0 = 0.5 * (1.0 - kAlpha); |
| @@ -116,6 +124,18 @@ void SincResampler::InitializeKernel() { |
| // depending on kKernelSize. |
| sinc_scale_factor *= 0.9; |
| + if (!first_run) { |
|
Chris Rogers
2013/04/08 19:38:11
A simple comment explaining the optimization we're
DaleCurtis
2013/04/15 20:27:31
Done.
|
| + for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) { |
| + for (int i = 0; i < kKernelSize; ++i) { |
| + const int idx = i + offset_idx * kKernelSize; |
| + const double s = sinc_scale_factor * kernel_pre_sinc_storage_[idx]; |
| + const double sinc = (s == 0 ? 1.0 : sin(s) / s) * sinc_scale_factor; |
| + kernel_storage_[idx] = sinc * kernel_window_storage_[idx]; |
| + } |
| + } |
| + return; |
| + } |
| + |
| // Generates a set of windowed sinc() kernels. |
| // We generate a range of sub-sample offsets from 0.0 to 1.0. |
| for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) { |
| @@ -123,22 +143,32 @@ void SincResampler::InitializeKernel() { |
| static_cast<double>(offset_idx) / kKernelOffsetCount; |
| for (int i = 0; i < kKernelSize; ++i) { |
| + double pre_sinc = M_PI * (i - kKernelSize / 2 - subsample_offset); |
| + |
| // Compute the sinc with offset. |
| - double s = |
| - sinc_scale_factor * M_PI * (i - kKernelSize / 2 - subsample_offset); |
| - double sinc = (!s ? 1.0 : sin(s) / s) * sinc_scale_factor; |
| + double s = sinc_scale_factor * pre_sinc; |
| + double sinc = (s == 0 ? 1.0 : sin(s) / s) * sinc_scale_factor; |
| // Compute Blackman window, matching the offset of the sinc(). |
| double x = (i - subsample_offset) / kKernelSize; |
| double window = kA0 - kA1 * cos(2.0 * M_PI * x) + kA2 |
| * cos(4.0 * M_PI * x); |
| + const int idx = i + offset_idx * kKernelSize; |
| + |
| // Window the sinc() function and store at the correct offset. |
| - kernel_storage_.get()[i + offset_idx * kKernelSize] = sinc * window; |
| + kernel_pre_sinc_storage_[idx] = pre_sinc; |
| + kernel_window_storage_[idx] = window; |
| + kernel_storage_[idx] = sinc * window; |
| } |
| } |
| } |
| +void SincResampler::UpdateSampleRateRatio(double io_sample_rate_ratio) { |
| + io_sample_rate_ratio_ = io_sample_rate_ratio; |
| + InitializeKernel(false); |
| +} |
| + |
| // If we know the minimum architecture avoid function hopping for CPU detection. |
| #if defined(ARCH_CPU_X86_FAMILY) |
| #if defined(__SSE__) |