Index: media/filters/audio_renderer_algorithm_base.cc |
diff --git a/media/filters/audio_renderer_algorithm_base.cc b/media/filters/audio_renderer_algorithm_base.cc |
index 29bafedffaaa7293892abdb6169c9e285712ca14..2b7a340d67333950e1c9a5f3129b271cf8a35550 100644 |
--- a/media/filters/audio_renderer_algorithm_base.cc |
+++ b/media/filters/audio_renderer_algorithm_base.cc |
@@ -41,7 +41,12 @@ AudioRendererAlgorithmBase::AudioRendererAlgorithmBase() |
bytes_per_channel_(0), |
playback_rate_(0.0f), |
audio_buffer_(0, kStartingBufferSizeInBytes), |
- crossfade_size_(0), |
+ bytes_in_crossfade_(0), |
+ bytes_per_frame_(0), |
+ index_into_window_(0), |
+ crossfade_frame_number_(0), |
+ muted_(false), |
+ needs_more_data_(false), |
window_size_(0) { |
} |
@@ -84,122 +89,289 @@ void AudioRendererAlgorithmBase::Initialize( |
channels_ = channels; |
samples_per_second_ = samples_per_second; |
bytes_per_channel_ = bits_per_channel / 8; |
+ bytes_per_frame_ = bytes_per_channel_ * channels_; |
request_read_cb_ = callback; |
SetPlaybackRate(initial_playback_rate); |
window_size_ = |
samples_per_second_ * bytes_per_channel_ * channels_ * kWindowDuration; |
- AlignToSampleBoundary(&window_size_); |
+ AlignToFrameBoundary(&window_size_); |
- crossfade_size_ = |
+ bytes_in_crossfade_ = |
samples_per_second_ * bytes_per_channel_ * channels_ * kCrossfadeDuration; |
- AlignToSampleBoundary(&crossfade_size_); |
+ AlignToFrameBoundary(&bytes_in_crossfade_); |
+ |
+ crossfade_buffer_.reset(new uint8[bytes_in_crossfade_]); |
} |
-uint32 AudioRendererAlgorithmBase::FillBuffer(uint8* dest, uint32 length) { |
- if (IsQueueEmpty() || playback_rate_ == 0.0f) |
+uint32 AudioRendererAlgorithmBase::FillBuffer( |
+ uint8* dest, uint32 requested_frames) { |
+ if (playback_rate_ == 0.0f || bytes_per_frame_ == 0) |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
Is "bytes_per_frame == 0" necessary because Initia
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
Oops, it's not necessary! This should be a DCHECK,
|
return 0; |
- // Handle the simple case of normal playback. |
- if (playback_rate_ == 1.0f) { |
- uint32 bytes_written = |
- CopyFromAudioBuffer(dest, std::min(length, bytes_buffered())); |
- AdvanceBufferPosition(bytes_written); |
- return bytes_written; |
+ uint32 total_frames_rendered = 0; |
+ uint8* output_ptr = dest; |
+ while(total_frames_rendered < requested_frames) { |
+ if (index_into_window_ == window_size_) |
+ ResetWindow(); |
+ |
+ bool renders_frame = true; |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
nit: rendered_frame?
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
Done.
|
+ if (playback_rate_ > 1.0) |
+ renders_frame = OutputFasterPlayback(output_ptr); |
+ else if (playback_rate_ < 1.0) |
+ renders_frame = OutputSlowerPlayback(output_ptr); |
+ else |
+ renders_frame = OutputNormalPlayback(output_ptr); |
+ |
+ if (!renders_frame) { |
+ needs_more_data_ = true; |
+ break; |
+ } |
+ |
+ output_ptr += bytes_per_frame_; |
+ total_frames_rendered++; |
} |
+ return total_frames_rendered; |
+} |
+ |
+void AudioRendererAlgorithmBase::ResetWindow() { |
+ DCHECK_EQ(index_into_window_, window_size_); |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
Should this methods get called on FlushBuffers()?
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
Good catch! Yes, this should get called then. Adde
|
+ index_into_window_ = 0; |
+ crossfade_frame_number_ = 0; |
+ muted_ = false; |
+} |
+ |
+bool AudioRendererAlgorithmBase::OutputFasterPlayback(uint8* dest) { |
+ DCHECK_LT(index_into_window_, window_size_); |
+ DCHECK_GT(playback_rate_, 1.0); |
- // Output muted data when out of acceptable quality range. |
- if (playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate) |
- return MuteBuffer(dest, length); |
+ if (audio_buffer_.forward_bytes() < bytes_per_frame_) |
+ return false; |
+ if (playback_rate_ > kMaxPlaybackRate) |
+ muted_ = true; |
+ |
+ // The audio data is output in a series of windows. For sped-up playback, |
+ // window is comprised of the following phases: |
+ // |
+ // a) Output raw data. |
+ // b) Save bytes for crossfade in |crossfade_buffer_|. |
+ // c) Drop data. |
+ // d) Output crossfaded audio leading up to the next window. |
+ // |
+ // The duration of each phase is computed below based on the |window_size_| |
+ // and |playback_rate_|. |
uint32 input_step = window_size_; |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
I'm assuming we have to use uint32 everywhere beca
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
Done at top of header file.
|
+ uint32 output_step = ceil(window_size_ / playback_rate_); |
+ AlignToFrameBoundary(&output_step); |
+ DCHECK_GT(input_step, output_step); |
+ |
+ uint32 bytes_to_crossfade = bytes_in_crossfade_; |
+ if (muted_ || bytes_to_crossfade > output_step) |
+ bytes_to_crossfade = 0; |
+ |
+ // This is the index of the end of phase a, beginning of phase b. |
+ uint32 outtro_crossfade_begin = output_step - bytes_to_crossfade; |
+ |
+ // This is the index of the end of phase b, beginning of phase c. |
+ uint32 outtro_crossfade_end = output_step; |
+ |
+ // This is the index of the end of phase c, beginning of phase d. |
+ // This phase continues until |index_into_window_| reaches |window_size_|, at |
+ // which point the window restarts. |
+ uint32 intro_crossfade_begin = input_step - bytes_to_crossfade; |
+ |
+ // a) Output a raw frame if we haven't reached the crossfade section. |
+ if (index_into_window_ < outtro_crossfade_begin) { |
+ ReadRawFrame(dest); |
+ index_into_window_ += bytes_per_frame_; |
+ return true; |
+ } |
+ |
+ // b) Drop frames until we reach the intro crossfade section. |
+ while (audio_buffer_.forward_bytes() >= bytes_per_frame_ && |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
I think just having a loop for b) and one for c) w
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
Arrghhh I also mislabeled these comments! (Logic i
|
+ index_into_window_ < intro_crossfade_begin) { |
+ if (index_into_window_ < outtro_crossfade_end) { |
+ // c) Save crossfade frame into intermediate buffer. |
+ uint8* place_to_copy = crossfade_buffer_.get() + |
+ (index_into_window_ - outtro_crossfade_begin); |
+ ReadRawFrame(place_to_copy); |
+ } else { |
+ DropFrame(); |
+ } |
+ index_into_window_ += bytes_per_frame_; |
+ } |
+ |
+ // d) Crossfade and output frames. |
+ if (index_into_window_ < window_size_ && |
+ index_into_window_ >= intro_crossfade_begin && |
+ audio_buffer_.forward_bytes() >= bytes_per_frame_) { |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
Reverse condition and return early to reduce inden
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
Done.
|
+ uint32 offset_into_buffer = index_into_window_ - intro_crossfade_begin; |
+ memcpy(dest, crossfade_buffer_.get() + offset_into_buffer, |
+ bytes_per_frame_); |
+ scoped_array<uint8> intro_frame_ptr(new uint8[bytes_per_frame_]); |
+ audio_buffer_.Read(intro_frame_ptr.get(), bytes_per_frame_); |
+ OutputCrossfadedFrame(dest, intro_frame_ptr.get()); |
+ index_into_window_ += bytes_per_frame_; |
+ return true; |
+ } |
+ |
+ return false; |
+} |
+ |
+bool AudioRendererAlgorithmBase::OutputSlowerPlayback(uint8* dest) { |
+ DCHECK_LT(index_into_window_, window_size_); |
+ DCHECK_LT(playback_rate_, 1.0); |
+ DCHECK_NE(playback_rate_, 0.0); |
+ |
+ if (audio_buffer_.forward_bytes() < bytes_per_frame_) |
+ return false; |
+ |
+ if (playback_rate_ < kMinPlaybackRate) |
+ muted_ = true; |
+ |
+ // The audio data is output in a series of windows. For slowed down playback, |
+ // window is comprised of the following phases: |
+ // |
+ // a) Output raw data. |
+ // b) Output and save bytes for crossfade in |crossfade_buffer_|. |
+ // c) Output raw data. |
+ // d) Output crossfaded audio leading up to the next window. |
+ // |
+ // Phases c) and d) do not progress |audio_buffer_|'s cursor so that the |
+ // |audio_buffer_|'s cursor is in the correct place for the next window. |
+ // |
+ // The duration of each phase is computed below based on the |window_size_| |
+ // and |playback_rate_|. |
+ uint32 input_step = ceil(window_size_ * playback_rate_); |
+ AlignToFrameBoundary(&input_step); |
uint32 output_step = window_size_; |
+ DCHECK_LT(input_step, output_step); |
- if (playback_rate_ > 1.0f) { |
- // Playback is faster than normal; need to squish output! |
- output_step = ceil(window_size_ / playback_rate_); |
- } else { |
- // Playback is slower than normal; need to stretch input! |
- input_step = ceil(window_size_ * playback_rate_); |
+ uint32 bytes_to_crossfade = bytes_in_crossfade_; |
+ if (muted_ || bytes_to_crossfade > input_step) |
+ bytes_to_crossfade = 0; |
+ |
+ // This is the index of the end of phase a, beginning of phase b. |
+ uint32 intro_crossfade_begin = input_step - bytes_to_crossfade; |
+ |
+ // This is the index of the end of phase b, beginning of phase c. |
+ uint32 intro_crossfade_end = input_step; |
+ |
+ // This is the index of the end of phase c, beginning of phase d. |
+ // This phase continues until |index_into_window_| reaches |window_size_|, at |
+ // which point the window restarts. |
+ uint32 outtro_crossfade_begin = output_step - bytes_to_crossfade; |
+ |
+ // a) Output raw frame. |
+ if (index_into_window_ < intro_crossfade_begin) { |
+ ReadRawFrame(dest); |
+ index_into_window_ += bytes_per_frame_; |
+ return true; |
} |
- AlignToSampleBoundary(&input_step); |
- AlignToSampleBoundary(&output_step); |
- DCHECK_LE(crossfade_size_, input_step); |
- DCHECK_LE(crossfade_size_, output_step); |
+ // b) Output and save raw frames that will make up the intro crossfade |
+ // section. |
+ if (index_into_window_ < intro_crossfade_end) { |
+ uint32 offset = index_into_window_ - intro_crossfade_begin; |
+ uint8* place_to_copy = crossfade_buffer_.get() + offset; |
+ PeekRawFrame(place_to_copy); |
+ ReadRawFrame(dest); |
+ index_into_window_ += bytes_per_frame_; |
+ return true; |
+ } |
- uint32 bytes_written = 0; |
- uint32 bytes_left_to_output = length; |
- uint8* output_ptr = dest; |
+ uint32 audio_buffer_offset = index_into_window_ - intro_crossfade_end; |
- // TODO(vrk): The while loop and if test below are lame! We are requiring the |
- // client to provide us with enough data to output only complete crossfaded |
- // windows. Instead, we should output as much data as we can, and add state to |
- // keep track of what point in the crossfade we are at. |
- // This is also the cause of crbug.com/108239. |
- while (bytes_left_to_output >= output_step) { |
- // If there is not enough data buffered to complete an iteration of the |
- // loop, mute the remaining and break. |
- if (bytes_buffered() < window_size_) { |
- bytes_written += MuteBuffer(output_ptr, bytes_left_to_output); |
- break; |
+ // c) Output more raw frames. |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
nit: Perhaps " Output more raw frames w/o advancin
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
Done, and also added asterisks to the function-lev
|
+ if (audio_buffer_.forward_bytes() >= audio_buffer_offset + bytes_per_frame_) { |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
Reverse test and return early to reduce indenting.
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
Done.
|
+ DCHECK_GE(index_into_window_, intro_crossfade_end); |
+ PeekRawFrame(dest, audio_buffer_offset); |
+ |
+ // d) Crossfade the next frame of |crossfade_buffer_| into |dest|. |
+ if (index_into_window_ >= outtro_crossfade_begin) { |
+ uint32 offset_into_crossfade_buffer = |
+ index_into_window_ - outtro_crossfade_begin; |
+ uint8* intro_frame_ptr = |
+ crossfade_buffer_.get() + offset_into_crossfade_buffer; |
+ OutputCrossfadedFrame(dest, intro_frame_ptr); |
} |
- // Copy |output_step| bytes into destination buffer. |
- uint32 copied = CopyFromAudioBuffer(output_ptr, output_step); |
- DCHECK_EQ(copied, output_step); |
- output_ptr += output_step; |
- bytes_written += copied; |
- bytes_left_to_output -= copied; |
- |
- // Copy the |crossfade_size_| bytes leading up to the next window that will |
- // be played into an intermediate buffer. This will be used to crossfade |
- // from the current window to the next. |
- AdvanceBufferPosition(input_step - crossfade_size_); |
- scoped_array<uint8> next_window_intro(new uint8[crossfade_size_]); |
- uint32 bytes_copied = |
- CopyFromAudioBuffer(next_window_intro.get(), crossfade_size_); |
- DCHECK_EQ(bytes_copied, crossfade_size_); |
- AdvanceBufferPosition(crossfade_size_); |
- |
- // Prepare pointers to end of the current window and the start of the next |
- // window. |
- uint8* start_of_outro = output_ptr - crossfade_size_; |
- const uint8* start_of_intro = next_window_intro.get(); |
- |
- // Do crossfade! |
- Crossfade(crossfade_size_, channels_, bytes_per_channel_, |
- start_of_intro, start_of_outro); |
+ index_into_window_ += bytes_per_frame_; |
+ return true; |
} |
- return bytes_written; |
+ return false; |
} |
-uint32 AudioRendererAlgorithmBase::MuteBuffer(uint8* dest, uint32 length) { |
- DCHECK_NE(playback_rate_, 0.0); |
- // Note: This may not play at the speed requested as we can only consume as |
- // much data as we have, and audio timestamps drive the pipeline clock. |
- // |
- // Furthermore, we won't end up scaling the very last bit of audio, but |
- // we're talking about <8ms of audio data. |
- |
- // Cap the |input_step| by the amount of bytes buffered. |
- uint32 input_step = |
- std::min(static_cast<uint32>(length * playback_rate_), bytes_buffered()); |
- uint32 output_step = input_step / playback_rate_; |
- AlignToSampleBoundary(&input_step); |
- AlignToSampleBoundary(&output_step); |
- |
- DCHECK_LE(output_step, length); |
- if (output_step > length) { |
- LOG(ERROR) << "OLA: output_step (" << output_step << ") calculated to " |
- << "be larger than destination length (" << length << ")"; |
- output_step = length; |
+bool AudioRendererAlgorithmBase::OutputNormalPlayback(uint8* dest) { |
+ if (audio_buffer_.forward_bytes() >= bytes_per_frame_) { |
+ ReadRawFrame(dest); |
+ index_into_window_ += bytes_per_frame_; |
+ return true; |
+ } |
+ return false; |
+} |
+ |
+void AudioRendererAlgorithmBase::ReadRawFrame(uint8* dest) { |
+ PeekRawFrame(dest); |
+ DropFrame(); |
+} |
+ |
+void AudioRendererAlgorithmBase::PeekRawFrame(uint8* dest) { |
+ PeekRawFrame(dest, 0); |
+} |
+ |
+void AudioRendererAlgorithmBase::PeekRawFrame(uint8* dest, uint32 offset) { |
+ if (!muted_) { |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
nit: Reverse test, do memset, & return early. Unin
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
Done.
|
+ uint32 copied = audio_buffer_.Peek(dest, bytes_per_frame_, offset); |
+ DCHECK_EQ(bytes_per_frame_, copied); |
+ } else { |
+ memset(dest, 0, bytes_per_frame_); |
} |
+} |
+ |
+void AudioRendererAlgorithmBase::DropFrame() { |
+ audio_buffer_.Seek(bytes_per_frame_); |
- memset(dest, 0, output_step); |
- AdvanceBufferPosition(input_step); |
- return output_step; |
+ if (!IsQueueFull()) |
+ request_read_cb_.Run(); |
+} |
+ |
+void AudioRendererAlgorithmBase::OutputCrossfadedFrame( |
+ uint8* outtro, const uint8* intro) { |
+ DCHECK_LE(index_into_window_, window_size_); |
+ DCHECK(!muted_); |
+ |
+ switch (bytes_per_channel_) { |
+ case 4: |
+ CrossfadeFrame(reinterpret_cast<int32*>(outtro), |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
You can change this to CrossfadeFrame<int32>(outtr
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
I tried and failed at this! It compiles, it looks
|
+ reinterpret_cast<const int32*>(intro)); |
+ break; |
+ case 2: |
+ CrossfadeFrame(reinterpret_cast<int16*>(outtro), |
+ reinterpret_cast<const int16*>(intro)); |
+ break; |
+ case 1: |
+ CrossfadeFrame(outtro, intro); |
+ break; |
+ default: |
+ NOTREACHED() << "Unsupported audio bit depth in crossfade."; |
+ } |
+} |
+ |
+template <class Type> |
+void AudioRendererAlgorithmBase::CrossfadeFrame( |
+ Type* outtro, const Type* intro) { |
+ uint32 frames_in_crossfade = bytes_in_crossfade_ / bytes_per_frame_; |
+ float crossfade_ratio = |
+ static_cast<float>(crossfade_frame_number_) / frames_in_crossfade; |
+ for (int channel = 0; channel < channels_; ++channel) { |
+ *outtro = (*outtro) * (1.0 - crossfade_ratio) + (*intro) * crossfade_ratio; |
+ outtro++; |
acolwell GONE FROM CHROMIUM
2012/02/22 07:51:40
nit: *outtro++ = ... *intro++
vrk (LEFT CHROMIUM)
2012/02/23 20:33:06
I don't think I can do that, since I want to incre
|
+ intro++; |
+ } |
+ crossfade_frame_number_++; |
} |
void AudioRendererAlgorithmBase::SetPlaybackRate(float new_rate) { |
@@ -207,8 +379,8 @@ void AudioRendererAlgorithmBase::SetPlaybackRate(float new_rate) { |
playback_rate_ = new_rate; |
} |
-void AudioRendererAlgorithmBase::AlignToSampleBoundary(uint32* value) { |
- (*value) -= ((*value) % (channels_ * bytes_per_channel_)); |
+void AudioRendererAlgorithmBase::AlignToFrameBoundary(uint32* value) { |
+ (*value) -= ((*value) % bytes_per_frame_); |
} |
void AudioRendererAlgorithmBase::FlushBuffers() { |
@@ -222,15 +394,19 @@ base::TimeDelta AudioRendererAlgorithmBase::GetTime() { |
} |
void AudioRendererAlgorithmBase::EnqueueBuffer(Buffer* buffer_in) { |
- // If we're at end of stream, |buffer_in| contains no data. |
- if (!buffer_in->IsEndOfStream()) |
- audio_buffer_.Append(buffer_in); |
+ DCHECK(!buffer_in->IsEndOfStream()); |
+ audio_buffer_.Append(buffer_in); |
+ needs_more_data_ = false; |
// If we still don't have enough data, request more. |
if (!IsQueueFull()) |
request_read_cb_.Run(); |
} |
+bool AudioRendererAlgorithmBase::NeedsMoreData() { |
+ return needs_more_data_; |
+} |
+ |
bool AudioRendererAlgorithmBase::IsQueueEmpty() { |
return audio_buffer_.forward_bytes() == 0; |
} |
@@ -248,16 +424,4 @@ void AudioRendererAlgorithmBase::IncreaseQueueCapacity() { |
std::min(2 * audio_buffer_.forward_capacity(), kMaxBufferSizeInBytes)); |
} |
-void AudioRendererAlgorithmBase::AdvanceBufferPosition(uint32 bytes) { |
- audio_buffer_.Seek(bytes); |
- |
- if (!IsQueueFull()) |
- request_read_cb_.Run(); |
-} |
- |
-uint32 AudioRendererAlgorithmBase::CopyFromAudioBuffer( |
- uint8* dest, uint32 bytes) { |
- return audio_buffer_.Peek(dest, bytes); |
-} |
- |
} // namespace media |