Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(631)

Side by Side Diff: media/base/sinc_resampler.cc

Issue 14189035: Reduce jitter from uneven SincResampler buffer size requests. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Cleanup. Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 // 4 //
5 // Input buffer layout, dividing the total buffer into regions (r0_ - r5_): 5 // Initial input buffer layout, dividing into regions r0_ to r4_:
6 // 6 //
7 // |----------------|-----------------------------------------|----------------| 7 // |----------------|-----------------------------------------|----------------|
8 // 8 //
9 // kBlockSize + kKernelSize / 2 9 // 1st request: request_size_
10 // <---------------------------------------------------------> 10 // <--------------------------------------------------------->
11 // r0_ 11 // r0_
12 // 12 //
13 // block_size_ = request_size_ - kKernelSize / 2
14 // <--------------------------------------->
15 //
13 // kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 16 // kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2
14 // <---------------> <---------------> <---------------> <---------------> 17 // <---------------> <---------------> <---------------> <--------------->
15 // r1_ r2_ r3_ r4_ 18 // r1_ r2_ r3_ r4_
16 // 19 //
17 // kBlockSize 20 // On the second request, block_size_ increases to request_size_ while r0_, r3_,
18 // <---------------------------------------> 21 // and r4_ slide to the right by kKernelSize / 2:
19 // r5_ 22 //
23 // |----------------|-----------------------------------------|----------------|
24 //
25 // 2nd request: request_size_
26 // <------------------ ... ----------------->
27 //
28 // block_size_ = request_size_
29 // <---------------- ... ------------------>
30 //
31 // These new regions remain constant until a Flush() occurs. While complicated,
32 // this allows us to reduce jitter by always requesting the same amount from the
33 // provided callback.
20 // 34 //
21 // The algorithm: 35 // The algorithm:
22 // 36 //
23 // 1) Consume input frames into r0_ (r1_ is zero-initialized). 37 // 1) Consume |request_size_| frames into r0_ (r1_ is zero-initialized).
24 // 2) Position kernel centered at start of r0_ (r2_) and generate output frames 38 // 2) Position kernel centered at start of r0_ (r2_) and generate output frames
25 // until kernel is centered at start of r4_ or we've finished generating all 39 // until kernel is centered at start of r4_ or we've finished generating all
26 // the output frames. 40 // the output frames.
27 // 3) Copy r3_ to r1_ and r4_ to r2_. 41 // 3) Copy r3_ to r1_, r4_ to r2_.
28 // 4) Consume input frames into r5_ (zero-pad if we run out of input). 42 // 4) If we're on the second load, set block_size_ equal to request_size_
29 // 5) Goto (2) until all of input is consumed. 43 // and reinitialize r0_, r3_, and r4_ appropriately.
44 // 5) Goto (1).
30 // 45 //
31 // Note: we're glossing over how the sub-sample handling works with 46 // Note: we're glossing over how the sub-sample handling works with
32 // |virtual_source_idx_|, etc. 47 // |virtual_source_idx_|, etc.
33 48
34 // MSVC++ requires this to be set before any other includes to get M_PI. 49 // MSVC++ requires this to be set before any other includes to get M_PI.
35 #define _USE_MATH_DEFINES 50 #define _USE_MATH_DEFINES
36 51
37 #include "media/base/sinc_resampler.h" 52 #include "media/base/sinc_resampler.h"
38 53
39 #include <cmath> 54 #include <cmath>
(...skipping 17 matching lines...) Expand all
57 // windowing it the transition from pass to stop does not happen right away. 72 // windowing it the transition from pass to stop does not happen right away.
58 // So we should adjust the low pass filter cutoff slightly downward to avoid 73 // So we should adjust the low pass filter cutoff slightly downward to avoid
59 // some aliasing at the very high-end. 74 // some aliasing at the very high-end.
60 // TODO(crogers): this value is empirical and to be more exact should vary 75 // TODO(crogers): this value is empirical and to be more exact should vary
61 // depending on kKernelSize. 76 // depending on kKernelSize.
62 sinc_scale_factor *= 0.9; 77 sinc_scale_factor *= 0.9;
63 78
64 return sinc_scale_factor; 79 return sinc_scale_factor;
65 } 80 }
66 81
67 SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb) 82 SincResampler::SincResampler(double io_sample_rate_ratio, size_t request_size,
83 const ReadCB& read_cb)
68 : io_sample_rate_ratio_(io_sample_rate_ratio), 84 : io_sample_rate_ratio_(io_sample_rate_ratio),
69 virtual_source_idx_(0),
70 buffer_primed_(false),
71 read_cb_(read_cb), 85 read_cb_(read_cb),
86 request_size_(request_size),
87 input_buffer_size_(request_size_ + kKernelSize),
72 // Create input buffers with a 16-byte alignment for SSE optimizations. 88 // Create input buffers with a 16-byte alignment for SSE optimizations.
73 kernel_storage_(static_cast<float*>( 89 kernel_storage_(static_cast<float*>(
74 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), 90 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
75 kernel_pre_sinc_storage_(static_cast<float*>( 91 kernel_pre_sinc_storage_(static_cast<float*>(
76 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), 92 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
77 kernel_window_storage_(static_cast<float*>( 93 kernel_window_storage_(static_cast<float*>(
78 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))), 94 base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
79 input_buffer_(static_cast<float*>( 95 input_buffer_(static_cast<float*>(
80 base::AlignedAlloc(sizeof(float) * kBufferSize, 16))), 96 base::AlignedAlloc(sizeof(float) * input_buffer_size_, 16))),
81 #if defined(ARCH_CPU_X86_FAMILY) && !defined(__SSE__) 97 #if defined(ARCH_CPU_X86_FAMILY) && !defined(__SSE__)
82 convolve_proc_(base::CPU().has_sse() ? Convolve_SSE : Convolve_C), 98 convolve_proc_(base::CPU().has_sse() ? Convolve_SSE : Convolve_C),
83 #endif 99 #endif
84 // Setup various region pointers in the buffer (see diagram above). 100 reinitialize_regions_(false) {
85 r0_(input_buffer_.get() + kKernelSize / 2), 101 Flush();
86 r1_(input_buffer_.get()), 102 CHECK_GT(block_size_, static_cast<size_t>(kKernelSize))
henrika (OOO until Aug 14) 2013/04/27 19:43:40 Thanks :-)
87 r2_(r0_), 103 << "block_size must be greater than kKernelSize!";
88 r3_(r0_ + kBlockSize - kKernelSize / 2),
89 r4_(r0_ + kBlockSize),
90 r5_(r0_ + kKernelSize / 2) {
91 // Ensure kKernelSize is a multiple of 32 for easy SSE optimizations; causes
92 // r0_ and r5_ (used for input) to always be 16-byte aligned by virtue of
93 // input_buffer_ being 16-byte aligned.
94 DCHECK_EQ(kKernelSize % 32, 0) << "kKernelSize must be a multiple of 32!";
95 DCHECK_GT(kBlockSize, kKernelSize)
96 << "kBlockSize must be greater than kKernelSize!";
97 // Basic sanity checks to ensure buffer regions are laid out correctly:
98 // r0_ and r2_ should always be the same position.
99 DCHECK_EQ(r0_, r2_);
100 // r1_ at the beginning of the buffer.
101 DCHECK_EQ(r1_, input_buffer_.get());
102 // r1_ left of r2_, r2_ left of r5_ and r1_, r2_ size correct.
103 DCHECK_EQ(r2_ - r1_, r5_ - r2_);
104 // r3_ left of r4_, r5_ left of r0_ and r3_ size correct.
105 DCHECK_EQ(r4_ - r3_, r5_ - r0_);
106 // r3_, r4_ size correct and r4_ at the end of the buffer.
107 DCHECK_EQ(r4_ + (r4_ - r3_), r1_ + kBufferSize);
108 // r5_ size correct and at the end of the buffer.
109 DCHECK_EQ(r5_ + kBlockSize, r1_ + kBufferSize);
110 104
111 memset(kernel_storage_.get(), 0, 105 memset(kernel_storage_.get(), 0,
112 sizeof(*kernel_storage_.get()) * kKernelStorageSize); 106 sizeof(*kernel_storage_.get()) * kKernelStorageSize);
113 memset(kernel_pre_sinc_storage_.get(), 0, 107 memset(kernel_pre_sinc_storage_.get(), 0,
114 sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize); 108 sizeof(*kernel_pre_sinc_storage_.get()) * kKernelStorageSize);
115 memset(kernel_window_storage_.get(), 0, 109 memset(kernel_window_storage_.get(), 0,
116 sizeof(*kernel_window_storage_.get()) * kKernelStorageSize); 110 sizeof(*kernel_window_storage_.get()) * kKernelStorageSize);
117 memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize);
118 111
119 InitializeKernel(); 112 InitializeKernel();
120 } 113 }
121 114
122 SincResampler::~SincResampler() {} 115 SincResampler::~SincResampler() {}
123 116
117 void SincResampler::UpdateRegions() {
118 // Setup various region pointers in the buffer (see diagram above).
119 r1_ = input_buffer_.get();
120 r2_ = input_buffer_.get() + kKernelSize / 2;
121 r0_ = reinitialize_regions_ ? input_buffer_.get() + kKernelSize : r2_;
122 r3_ = r2_ + block_size_ - kKernelSize / 2;
123 r4_ = r2_ + block_size_;
124
125 // r1_ at the beginning of the buffer.
126 CHECK_EQ(r1_, input_buffer_.get());
127 // r1_ left of r2_, r4_ left of r3_ and size correct.
128 CHECK_EQ(r2_ - r1_, r4_ - r3_);
129 // r2_ left of r3.
130 CHECK_LT(r2_, r3_);
131 }
132
124 void SincResampler::InitializeKernel() { 133 void SincResampler::InitializeKernel() {
125 // Blackman window parameters. 134 // Blackman window parameters.
126 static const double kAlpha = 0.16; 135 static const double kAlpha = 0.16;
127 static const double kA0 = 0.5 * (1.0 - kAlpha); 136 static const double kA0 = 0.5 * (1.0 - kAlpha);
128 static const double kA1 = 0.5; 137 static const double kA1 = 0.5;
129 static const double kA2 = 0.5 * kAlpha; 138 static const double kA2 = 0.5 * kAlpha;
130 139
131 // Generates a set of windowed sinc() kernels. 140 // Generates a set of windowed sinc() kernels.
132 // We generate a range of sub-sample offsets from 0.0 to 1.0. 141 // We generate a range of sub-sample offsets from 0.0 to 1.0.
133 const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_); 142 const double sinc_scale_factor = SincScaleFactor(io_sample_rate_ratio_);
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
199 #else 208 #else
200 // Unknown architecture. 209 // Unknown architecture.
201 #define CONVOLVE_FUNC Convolve_C 210 #define CONVOLVE_FUNC Convolve_C
202 #endif 211 #endif
203 212
204 void SincResampler::Resample(float* destination, int frames) { 213 void SincResampler::Resample(float* destination, int frames) {
205 int remaining_frames = frames; 214 int remaining_frames = frames;
206 215
207 // Step (1) -- Prime the input buffer at the start of the input stream. 216 // Step (1) -- Prime the input buffer at the start of the input stream.
208 if (!buffer_primed_) { 217 if (!buffer_primed_) {
209 read_cb_.Run(r0_, kBlockSize + kKernelSize / 2); 218 read_cb_.Run(r0_, request_size_);
210 buffer_primed_ = true; 219 buffer_primed_ = true;
220 reinitialize_regions_ = true;
211 } 221 }
212 222
213 // Step (2) -- Resample! 223 // Step (2) -- Resample!
214 while (remaining_frames) { 224 while (remaining_frames) {
215 while (virtual_source_idx_ < kBlockSize) { 225 while (virtual_source_idx_ < block_size_) {
216 // |virtual_source_idx_| lies in between two kernel offsets so figure out 226 // |virtual_source_idx_| lies in between two kernel offsets so figure out
217 // what they are. 227 // what they are.
218 int source_idx = static_cast<int>(virtual_source_idx_); 228 const int source_idx = virtual_source_idx_;
219 double subsample_remainder = virtual_source_idx_ - source_idx; 229 const double subsample_remainder = virtual_source_idx_ - source_idx;
220 230
221 double virtual_offset_idx = subsample_remainder * kKernelOffsetCount; 231 const double virtual_offset_idx =
222 int offset_idx = static_cast<int>(virtual_offset_idx); 232 subsample_remainder * kKernelOffsetCount;
233 const int offset_idx = virtual_offset_idx;
223 234
224 // We'll compute "convolutions" for the two kernels which straddle 235 // We'll compute "convolutions" for the two kernels which straddle
225 // |virtual_source_idx_|. 236 // |virtual_source_idx_|.
226 float* k1 = kernel_storage_.get() + offset_idx * kKernelSize; 237 const float* k1 = kernel_storage_.get() + offset_idx * kKernelSize;
227 float* k2 = k1 + kKernelSize; 238 const float* k2 = k1 + kKernelSize;
228 239
229 // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage. Should always be 240 // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage. Should always be
230 // true so long as kKernelSize is a multiple of 16. 241 // true so long as kKernelSize is a multiple of 16.
231 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x0F); 242 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x0F);
232 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x0F); 243 DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x0F);
233 244
234 // Initialize input pointer based on quantized |virtual_source_idx_|. 245 // Initialize input pointer based on quantized |virtual_source_idx_|.
235 float* input_ptr = r1_ + source_idx; 246 const float* input_ptr = r1_ + source_idx;
236 247
237 // Figure out how much to weight each kernel's "convolution". 248 // Figure out how much to weight each kernel's "convolution".
238 double kernel_interpolation_factor = virtual_offset_idx - offset_idx; 249 const double kernel_interpolation_factor =
250 virtual_offset_idx - offset_idx;
239 *destination++ = CONVOLVE_FUNC( 251 *destination++ = CONVOLVE_FUNC(
240 input_ptr, k1, k2, kernel_interpolation_factor); 252 input_ptr, k1, k2, kernel_interpolation_factor);
241 253
242 // Advance the virtual index. 254 // Advance the virtual index.
243 virtual_source_idx_ += io_sample_rate_ratio_; 255 virtual_source_idx_ += io_sample_rate_ratio_;
244 256
245 if (!--remaining_frames) 257 if (!--remaining_frames)
246 return; 258 return;
247 } 259 }
248 260
249 // Wrap back around to the start. 261 // Wrap back around to the start.
250 virtual_source_idx_ -= kBlockSize; 262 virtual_source_idx_ -= block_size_;
251 263
252 // Step (3) Copy r3_ to r1_ and r4_ to r2_. 264 // Step (3) -- Copy r3_, r4_ to r1_, r2_.
253 // This wraps the last input frames back to the start of the buffer. 265 // This wraps the last input frames back to the start of the buffer.
254 memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * (kKernelSize / 2)); 266 memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * kKernelSize);
255 memcpy(r2_, r4_, sizeof(*input_buffer_.get()) * (kKernelSize / 2));
256 267
257 // Step (4) 268 // Step (4) -- Reinitialize regions if necessary.
258 // Refresh the buffer with more input. 269 if (reinitialize_regions_) {
259 read_cb_.Run(r5_, kBlockSize); 270 block_size_ = request_size_;
271 UpdateRegions();
272 reinitialize_regions_ = false;
273 }
274
275 // Step (5) -- Refresh the buffer with more input.
276 read_cb_.Run(r0_, request_size_);
260 } 277 }
261 } 278 }
262 279
263 #undef CONVOLVE_FUNC 280 #undef CONVOLVE_FUNC
264 281
265 int SincResampler::ChunkSize() const { 282 int SincResampler::ChunkSize() const {
266 return kBlockSize / io_sample_rate_ratio_; 283 return block_size_ / io_sample_rate_ratio_;
267 } 284 }
268 285
269 void SincResampler::Flush() { 286 void SincResampler::Flush() {
270 virtual_source_idx_ = 0; 287 virtual_source_idx_ = 0;
271 buffer_primed_ = false; 288 buffer_primed_ = false;
272 memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize); 289 memset(input_buffer_.get(), 0,
290 sizeof(*input_buffer_.get()) * input_buffer_size_);
291
292 block_size_ = request_size_ - kKernelSize / 2;
293 reinitialize_regions_ = false;
294 UpdateRegions();
273 } 295 }
274 296
275 float SincResampler::Convolve_C(const float* input_ptr, const float* k1, 297 float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
276 const float* k2, 298 const float* k2,
277 double kernel_interpolation_factor) { 299 double kernel_interpolation_factor) {
278 float sum1 = 0; 300 float sum1 = 0;
279 float sum2 = 0; 301 float sum2 = 0;
280 302
281 // Generate a single output sample. Unrolling this loop hurt performance in 303 // Generate a single output sample. Unrolling this loop hurt performance in
282 // local testing. 304 // local testing.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)), 336 vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
315 m_sums2, vmovq_n_f32(kernel_interpolation_factor)); 337 m_sums2, vmovq_n_f32(kernel_interpolation_factor));
316 338
317 // Sum components together. 339 // Sum components together.
318 float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1)); 340 float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1));
319 return vget_lane_f32(vpadd_f32(m_half, m_half), 0); 341 return vget_lane_f32(vpadd_f32(m_half, m_half), 0);
320 } 342 }
321 #endif 343 #endif
322 344
323 } // namespace media 345 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698