| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 // | 4 // |
| 5 // Input buffer layout, dividing the total buffer into regions (r0_ - r5_): | 5 // Initial input buffer layout, dividing into regions r0_ to r4_ (note: r0_, r3_ |
| 6 // and r4_ will move after the first load): |
| 6 // | 7 // |
| 7 // |----------------|-----------------------------------------|----------------| | 8 // |----------------|-----------------------------------------|----------------| |
| 8 // | 9 // |
| 9 // kBlockSize + kKernelSize / 2 | 10 // request_frames_ |
| 10 // <---------------------------------------------------------> | 11 // <---------------------------------------------------------> |
| 11 // r0_ | 12 // r0_ (during first load) |
| 12 // | 13 // |
| 13 // kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 | 14 // kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 |
| 14 // <---------------> <---------------> <---------------> <---------------> | 15 // <---------------> <---------------> <---------------> <---------------> |
| 15 // r1_ r2_ r3_ r4_ | 16 // r1_ r2_ r3_ r4_ |
| 16 // | 17 // |
| 17 // kBlockSize | 18 // block_size_ == r4_ - r2_ |
| 18 // <---------------------------------------> | 19 // <---------------------------------------> |
| 19 // r5_ | 20 // |
| 21 // request_frames_ |
| 22 // <------------------ ... -----------------> |
| 23 // r0_ (during second load) |
| 24 // |
| 25 // On the second request r0_ slides to the right by kKernelSize / 2 and r3_, r4_ |
| 26 // and block_size_ are reinitialized via step (3) in the algorithm below. |
| 27 // |
| 28 // These new regions remain constant until a Flush() occurs. While complicated, |
| 29 // this allows us to reduce jitter by always requesting the same amount from the |
| 30 // provided callback. |
| 20 // | 31 // |
| 21 // The algorithm: | 32 // The algorithm: |
| 22 // | 33 // |
| 23 // 1) Consume input frames into r0_ (r1_ is zero-initialized). | 34 // 1) Allocate input_buffer of size: request_frames_ + kKernelSize; this ensures |
| 24 // 2) Position kernel centered at start of r0_ (r2_) and generate output frames | 35 // there's enough room to read request_frames_ from the callback into region |
| 25 // until kernel is centered at start of r4_ or we've finished generating all | 36 // r0_ (which will move between the first and subsequent passes). |
| 26 // the output frames. | 37 // |
| 27 // 3) Copy r3_ to r1_ and r4_ to r2_. | 38 // 2) Let r1_, r2_ each represent half the kernel centered around r0_: |
| 28 // 4) Consume input frames into r5_ (zero-pad if we run out of input). | 39 // |
| 29 // 5) Goto (2) until all of input is consumed. | 40 // r0_ = input_buffer_ + kKernelSize / 2 |
| 41 // r1_ = input_buffer_ |
| 42 // r2_ = r0_ |
| 43 // |
| 44 // r0_ is always request_frames_ in size. r1_, r2_ are kKernelSize / 2 in |
| 45 // size. r1_ must be zero initialized to avoid convolution with garbage (see |
| 46 // step (5) for why). |
| 47 // |
| 48 // 3) Let r3_, r4_ each represent half the kernel right aligned with the end of |
| 49 // r0_ and choose block_size_ as the distance in frames between r4_ and r2_: |
| 50 // |
| 51 // r3_ = r0_ + request_frames_ - kKernelSize |
| 52 // r4_ = r0_ + request_frames_ - kKernelSize / 2 |
| 53 // block_size_ = r4_ - r2_ = request_frames_ - kKernelSize / 2 |
| 54 // |
| 55 // 4) Consume request_frames_ frames into r0_. |
| 56 // |
| 57 // 5) Position kernel centered at start of r2_ and generate output frames until |
| 58 // the kernel is centered at the start of r4_ or we've finished generating |
| 59 // all the output frames. |
| 60 // |
| 61 // 6) Wrap left over data from the r3_ to r1_ and r4_ to r2_. |
| 62 // |
| 63 // 7) If we're on the second load, in order to avoid overwriting the frames we |
| 64 // just wrapped from r4_ we need to slide r0_ to the right by the size of |
| 65 // r4_, which is kKernelSize / 2: |
| 66 // |
| 67 // r0_ = r0_ + kKernelSize / 2 = input_buffer_ + kKernelSize |
| 68 // |
| 69 // r3_, r4_, and block_size_ then need to be reinitialized, so goto (3). |
| 70 // |
| 71 // 8) Else, if we're not on the second load, goto (4). |
| 30 // | 72 // |
| 31 // Note: we're glossing over how the sub-sample handling works with | 73 // Note: we're glossing over how the sub-sample handling works with |
| 32 // |virtual_source_idx_|, etc. | 74 // |virtual_source_idx_|, etc. |
| 33 | 75 |
| 34 // MSVC++ requires this to be set before any other includes to get M_PI. | 76 // MSVC++ requires this to be set before any other includes to get M_PI. |
| 35 #define _USE_MATH_DEFINES | 77 #define _USE_MATH_DEFINES |
| 36 | 78 |
| 37 #include "media/base/sinc_resampler.h" | 79 #include "media/base/sinc_resampler.h" |
| 38 | 80 |
| 39 #include <cmath> | 81 #include <cmath> |
| (...skipping 17 matching lines...) Expand all Loading... |
| 57 // windowing it the transition from pass to stop does not happen right away. | 99 // windowing it the transition from pass to stop does not happen right away. |
| 58 // So we should adjust the low pass filter cutoff slightly downward to avoid | 100 // So we should adjust the low pass filter cutoff slightly downward to avoid |
| 59 // some aliasing at the very high-end. | 101 // some aliasing at the very high-end. |
| 60 // TODO(crogers): this value is empirical and to be more exact should vary | 102 // TODO(crogers): this value is empirical and to be more exact should vary |
| 61 // depending on kKernelSize. | 103 // depending on kKernelSize. |
| 62 sinc_scale_factor *= 0.9; | 104 sinc_scale_factor *= 0.9; |
| 63 | 105 |
| 64 return sinc_scale_factor; | 106 return sinc_scale_factor; |
| 65 } | 107 } |
| 66 | 108 |
| 67 SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb) | 109 SincResampler::SincResampler(double io_sample_rate_ratio, |
| 110 size_t request_frames, |
| 111 const ReadCB& read_cb) |
| 68 : io_sample_rate_ratio_(io_sample_rate_ratio), | 112 : io_sample_rate_ratio_(io_sample_rate_ratio), |
| 69 virtual_source_idx_(0), | |
| 70 buffer_primed_(false), | |
| 71 read_cb_(read_cb), | 113 read_cb_(read_cb), |
| 114 request_frames_(request_frames), |
| 115 input_buffer_size_(request_frames_ + kKernelSize), |
| 72 // Create input buffers with a 16-byte alignment for SSE optimizations. | 116 // Create input buffers with a 16-byte alignment for SSE optimizations. |
| 73 kernel_storage_(static_cast<float*>( | 117 kernel_storage_(static_cast<float*>( |
| 74 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), | 118 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), |
| 75 kernel_pre_sinc_storage_(static_cast<float*>( | 119 kernel_pre_sinc_storage_(static_cast<float*>( |
| 76 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), | 120 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), |
| 77 kernel_window_storage_(static_cast<float*>( | 121 kernel_window_storage_(static_cast<float*>( |
| 78 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), | 122 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), |
| 79 input_buffer_(static_cast<float*>( | 123 input_buffer_(static_cast<float*>( |
| 80 base::AlignedAlloc(sizeof(float) * kBufferSize, 16))), | 124 base::AlignedAlloc(sizeof(float) * input_buffer_size_, 16))), |
| 81 #if defined(ARCH_CPU_X86_FAMILY) && !defined(__SSE__) | 125 #if defined(ARCH_CPU_X86_FAMILY) && !defined(__SSE__) |
| 82 convolve_proc_(base::CPU().has_sse() ? Convolve_SSE : Convolve_C), | 126 convolve_proc_(base::CPU().has_sse() ? Convolve_SSE : Convolve_C), |
| 83 #endif | 127 #endif |
| 84 // Setup various region pointers in the buffer (see diagram above). | |
| 85 r0_(input_buffer_.get() + kKernelSize / 2), | |
| 86 r1_(input_buffer_.get()), | 128 r1_(input_buffer_.get()), |
| 87 r2_(r0_), | 129 r2_(input_buffer_.get() + kKernelSize / 2) { |
| 88 r3_(r0_ + kBlockSize - kKernelSize / 2), | 130 Flush(); |
| 89 r4_(r0_ + kBlockSize), | 131 CHECK_GT(block_size_, static_cast<size_t>(kKernelSize)) |
| 90 r5_(r0_ + kKernelSize / 2) { | 132 << "block_size must be greater than kKernelSize!"; |
| 91 // Ensure kKernelSize is a multiple of 32 for easy SSE optimizations; causes | |
| 92 // r0_ and r5_ (used for input) to always be 16-byte aligned by virtue of | |
| 93 // input_buffer_ being 16-byte aligned. | |
| 94 DCHECK_EQ(kKernelSize % 32, 0) << "kKernelSize must be a multiple of 32!"; | |
| 95 DCHECK_GT(kBlockSize, kKernelSize) | |
| 96 << "kBlockSize must be greater than kKernelSize!"; | |
| 97 // Basic sanity checks to ensure buffer regions are laid out correctly: | |
| 98 // r0_ and r2_ should always be the same position. | |
| 99 DCHECK_EQ(r0_, r2_); | |
| 100 // r1_ at the beginning of the buffer. | |
| 101 DCHECK_EQ(r1_, input_buffer_.get()); | |
| 102 // r1_ left of r2_, r2_ left of r5_ and r1_, r2_ size correct. | |
| 103 DCHECK_EQ(r2_ - r1_, r5_ - r2_); | |
| 104 // r3_ left of r4_, r5_ left of r0_ and r3_ size correct. | |
| 105 DCHECK_EQ(r4_ - r3_, r5_ - r0_); | |
| 106 // r3_, r4_ size correct and r4_ at the end of the buffer. | |
| 107 DCHECK_EQ(r4_ + (r4_ - r3_), r1_ + kBufferSize); | |
| 108 // r5_ size correct and at the end of the buffer. | |
| 109 DCHECK_EQ(r5_ + kBlockSize, r1_ + kBufferSize); | |
| 110 | 133 |
| 111 memset(kernel_storage_.get(), 0, | 134 memset(kernel_storage_.get(), 0, |
| 112 sizeof(*kernel_storage_.get()) * kKernelStorageSize); | 135 sizeof(*kernel_storage_.get()) * kKernelStorageSize); |
| 113 memset(kernel_pre_sinc_storage_.get(), 0, | 136 memset(kernel_pre_sinc_storage_.get(), 0, |
| 114 sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize); | 137 sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize); |
| 115 memset(kernel_window_storage_.get(), 0, | 138 memset(kernel_window_storage_.get(), 0, |
| 116 sizeof(*kernel_window_storage_.get()) * kKernelStorageSize); | 139 sizeof(*kernel_window_storage_.get()) * kKernelStorageSize); |
| 117 memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize); | |
| 118 | 140 |
| 119 InitializeKernel(); | 141 InitializeKernel(); |
| 120 } | 142 } |
| 121 | 143 |
| 122 SincResampler::~SincResampler() {} | 144 SincResampler::~SincResampler() {} |
| 123 | 145 |
| 146 void SincResampler::UpdateRegions(bool second_load) { |
| 147 // Setup various region pointers in the buffer (see diagram above). If we're |
| 148 // on the second load we need to slide r0_ to the right by kKernelSize / 2. |
| 149 r0_ = input_buffer_.get() + (second_load ? kKernelSize : kKernelSize / 2); |
| 150 r3_ = r0_ + request_frames_ - kKernelSize; |
| 151 r4_ = r0_ + request_frames_ - kKernelSize / 2; |
| 152 block_size_ = r4_ - r2_; |
| 153 |
| 154 // r1_ at the beginning of the buffer. |
| 155 CHECK_EQ(r1_, input_buffer_.get()); |
| 156 // r1_ left of r2_, r4_ left of r3_ and size correct. |
| 157 CHECK_EQ(r2_ - r1_, r4_ - r3_); |
| 158 // r2_ left of r3. |
| 159 CHECK_LT(r2_, r3_); |
| 160 } |
| 161 |
| 124 void SincResampler::InitializeKernel() { | 162 void SincResampler::InitializeKernel() { |
| 125 // Blackman window parameters. | 163 // Blackman window parameters. |
| 126 static const double kAlpha = 0.16; | 164 static const double kAlpha = 0.16; |
| 127 static const double kA0 = 0.5 * (1.0 - kAlpha); | 165 static const double kA0 = 0.5 * (1.0 - kAlpha); |
| 128 static const double kA1 = 0.5; | 166 static const double kA1 = 0.5; |
| 129 static const double kA2 = 0.5 * kAlpha; | 167 static const double kA2 = 0.5 * kAlpha; |
| 130 | 168 |
| 131 // Generates a set of windowed sinc() kernels. | 169 // Generates a set of windowed sinc() kernels. |
| 132 // We generate a range of sub-sample offsets from 0.0 to 1.0. | 170 // We generate a range of sub-sample offsets from 0.0 to 1.0. |
| 133 const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_); | 171 const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_); |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 194 // TODO(dalecurtis): Once Chrome moves to a SSE baseline this can be removed. | 232 // TODO(dalecurtis): Once Chrome moves to a SSE baseline this can be removed. |
| 195 #define CONVOLVE_FUNC convolve_proc_ | 233 #define CONVOLVE_FUNC convolve_proc_ |
| 196 #endif | 234 #endif |
| 197 #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON) | 235 #elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON) |
| 198 #define CONVOLVE_FUNC Convolve_NEON | 236 #define CONVOLVE_FUNC Convolve_NEON |
| 199 #else | 237 #else |
| 200 // Unknown architecture. | 238 // Unknown architecture. |
| 201 #define CONVOLVE_FUNC Convolve_C | 239 #define CONVOLVE_FUNC Convolve_C |
| 202 #endif | 240 #endif |
| 203 | 241 |
| 204 void SincResampler::Resample(float* destination, int frames) { | 242 void SincResampler::Resample(int frames, float* destination) { |
| 205 int remaining_frames = frames; | 243 int remaining_frames = frames; |
| 206 | 244 |
| 207 // Step (1) -- Prime the input buffer at the start of the input stream. | 245 // Step (1) -- Prime the input buffer at the start of the input stream. |
| 208 if (!buffer_primed_) { | 246 if (!buffer_primed_) { |
| 209 read_cb_.Run(r0_, kBlockSize + kKernelSize / 2); | 247 read_cb_.Run(request_frames_, r0_); |
| 210 buffer_primed_ = true; | 248 buffer_primed_ = true; |
| 211 } | 249 } |
| 212 | 250 |
| 213 // Step (2) -- Resample! | 251 // Step (2) -- Resample! |
| 214 while (remaining_frames) { | 252 while (remaining_frames) { |
| 215 while (virtual_source_idx_ < kBlockSize) { | 253 while (virtual_source_idx_ < block_size_) { |
| 216 // |virtual_source_idx_| lies in between two kernel offsets so figure out | 254 // |virtual_source_idx_| lies in between two kernel offsets so figure out |
| 217 // what they are. | 255 // what they are. |
| 218 int source_idx = static_cast<int>(virtual_source_idx_); | 256 const int source_idx = virtual_source_idx_; |
| 219 double subsample_remainder = virtual_source_idx_ - source_idx; | 257 const double subsample_remainder = virtual_source_idx_ - source_idx; |
| 220 | 258 |
| 221 double virtual_offset_idx = subsample_remainder * kKernelOffsetCount; | 259 const double virtual_offset_idx = |
| 222 int offset_idx = static_cast<int>(virtual_offset_idx); | 260 subsample_remainder * kKernelOffsetCount; |
| 261 const int offset_idx = virtual_offset_idx; |
| 223 | 262 |
| 224 // We'll compute "convolutions" for the two kernels which straddle | 263 // We'll compute "convolutions" for the two kernels which straddle |
| 225 // |virtual_source_idx_|. | 264 // |virtual_source_idx_|. |
| 226 float* k1 = kernel_storage_.get() + offset_idx * kKernelSize; | 265 const float* k1 = kernel_storage_.get() + offset_idx * kKernelSize; |
| 227 float* k2 = k1 + kKernelSize; | 266 const float* k2 = k1 + kKernelSize; |
| 228 | 267 |
| 229 // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage. Should always be | 268 // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage. Should always be |
| 230 // true so long as kKernelSize is a multiple of 16. | 269 // true so long as kKernelSize is a multiple of 16. |
| 231 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x0F); | 270 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x0F); |
| 232 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x0F); | 271 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x0F); |
| 233 | 272 |
| 234 // Initialize input pointer based on quantized |virtual_source_idx_|. | 273 // Initialize input pointer based on quantized |virtual_source_idx_|. |
| 235 float* input_ptr = r1_ + source_idx; | 274 const float* input_ptr = r1_ + source_idx; |
| 236 | 275 |
| 237 // Figure out how much to weight each kernel's "convolution". | 276 // Figure out how much to weight each kernel's "convolution". |
| 238 double kernel_interpolation_factor = virtual_offset_idx - offset_idx; | 277 const double kernel_interpolation_factor = |
| 278 virtual_offset_idx - offset_idx; |
| 239 *destination++ = CONVOLVE_FUNC( | 279 *destination++ = CONVOLVE_FUNC( |
| 240 input_ptr, k1, k2, kernel_interpolation_factor); | 280 input_ptr, k1, k2, kernel_interpolation_factor); |
| 241 | 281 |
| 242 // Advance the virtual index. | 282 // Advance the virtual index. |
| 243 virtual_source_idx_ += io_sample_rate_ratio_; | 283 virtual_source_idx_ += io_sample_rate_ratio_; |
| 244 | 284 |
| 245 if (!--remaining_frames) | 285 if (!--remaining_frames) |
| 246 return; | 286 return; |
| 247 } | 287 } |
| 248 | 288 |
| 249 // Wrap back around to the start. | 289 // Wrap back around to the start. |
| 250 virtual_source_idx_ -= kBlockSize; | 290 virtual_source_idx_ -= block_size_; |
| 251 | 291 |
| 252 // Step (3) Copy r3_ to r1_ and r4_ to r2_. | 292 // Step (3) -- Copy r3_, r4_ to r1_, r2_. |
| 253 // This wraps the last input frames back to the start of the buffer. | 293 // This wraps the last input frames back to the start of the buffer. |
| 254 memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * (kKernelSize / 2)); | 294 memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize); |
| 255 memcpy(r2_, r4_, sizeof(*input_buffer_.get()) * (kKernelSize / 2)); | |
| 256 | 295 |
| 257 // Step (4) | 296 // Step (4) -- Reinitialize regions if necessary. |
| 258 // Refresh the buffer with more input. | 297 if (r0_ == r2_) |
| 259 read_cb_.Run(r5_, kBlockSize); | 298 UpdateRegions(true); |
| 299 |
| 300 // Step (5) -- Refresh the buffer with more input. |
| 301 read_cb_.Run(request_frames_, r0_); |
| 260 } | 302 } |
| 261 } | 303 } |
| 262 | 304 |
| 263 #undef CONVOLVE_FUNC | 305 #undef CONVOLVE_FUNC |
| 264 | 306 |
| 265 int SincResampler::ChunkSize() const { | 307 int SincResampler::ChunkSize() const { |
| 266 return kBlockSize / io_sample_rate_ratio_; | 308 return block_size_ / io_sample_rate_ratio_; |
| 267 } | 309 } |
| 268 | 310 |
| 269 void SincResampler::Flush() { | 311 void SincResampler::Flush() { |
| 270 virtual_source_idx_ = 0; | 312 virtual_source_idx_ = 0; |
| 271 buffer_primed_ = false; | 313 buffer_primed_ = false; |
| 272 memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize); | 314 memset(input_buffer_.get(), 0, |
| 315 sizeof(*input_buffer_.get()) * input_buffer_size_); |
| 316 UpdateRegions(false); |
| 273 } | 317 } |
| 274 | 318 |
| 275 float SincResampler::Convolve_C(const float* input_ptr, const float* k1, | 319 float SincResampler::Convolve_C(const float* input_ptr, const float* k1, |
| 276 const float* k2, | 320 const float* k2, |
| 277 double kernel_interpolation_factor) { | 321 double kernel_interpolation_factor) { |
| 278 float sum1 = 0; | 322 float sum1 = 0; |
| 279 float sum2 = 0; | 323 float sum2 = 0; |
| 280 | 324 |
| 281 // Generate a single output sample. Unrolling this loop hurt performance in | 325 // Generate a single output sample. Unrolling this loop hurt performance in |
| 282 // local testing. | 326 // local testing. |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 314 vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)), | 358 vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)), |
| 315 m_sums2, vmovq_n_f32(kernel_interpolation_factor)); | 359 m_sums2, vmovq_n_f32(kernel_interpolation_factor)); |
| 316 | 360 |
| 317 // Sum components together. | 361 // Sum components together. |
| 318 float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1)); | 362 float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1)); |
| 319 return vget_lane_f32(vpadd_f32(m_half, m_half), 0); | 363 return vget_lane_f32(vpadd_f32(m_half, m_half), 0); |
| 320 } | 364 } |
| 321 #endif | 365 #endif |
| 322 | 366 |
| 323 } // namespace media | 367 } // namespace media |
| OLD | NEW |