Chromium Code Reviews| Index: media/filters/audio_renderer_algorithm.cc |
| diff --git a/media/filters/audio_renderer_algorithm.cc b/media/filters/audio_renderer_algorithm.cc |
| index 97f0811384159ff35868bb45f10eabdc7a94d86b..aa48ff360aa3de5e6eb00d20893b84d2e8cc01b4 100644 |
| --- a/media/filters/audio_renderer_algorithm.cc |
| +++ b/media/filters/audio_renderer_algorithm.cc |
| @@ -12,41 +12,47 @@ |
| #include "media/audio/audio_util.h" |
| #include "media/base/audio_buffer.h" |
| #include "media/base/audio_bus.h" |
| +#include "media/filters/wsola_internals.h" |
| namespace media { |
| -// The starting size in frames for |audio_buffer_|. Previous usage maintained a |
| -// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we |
| -// maintain this number of frames. |
| -static const int kStartingBufferSizeInFrames = 16 * 512; |
| - |
| // The maximum size in frames for the |audio_buffer_|. Arbitrarily determined. |
| // This number represents 3 seconds of 96kHz/16 bit 7.1 surround sound. |
| static const int kMaxBufferSizeInFrames = 3 * 96000; |
| -// Duration of audio segments used for crossfading (in seconds). |
| -static const double kWindowDuration = 0.08; |
| - |
| -// Duration of crossfade between audio segments (in seconds). |
| -static const double kCrossfadeDuration = 0.008; |
| - |
| // Max/min supported playback rates for fast/slow audio. Audio outside of these |
| // ranges are muted. |
| // Audio at these speeds would sound better under a frequency domain algorithm. |
| static const float kMinPlaybackRate = 0.5f; |
| static const float kMaxPlaybackRate = 4.0f; |
| +// Overlap-and-add window size in milliseconds. |
| +static const int kOlaWindowSizeMs = 20; |
| + |
| +// Size of search interval in milliseconds. The search interval is |
| +// [-delta delta] around |output_index_| * |playback_rate_|. So the search |
| +// interval is 2 * delta. |
| +static const int kWsolaSearchIntervalMs = 30; |
| + |
| +// The starting size in frames for |audio_buffer_|. Previous usage maintained a |
| +// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we |
| +// maintain this number of frames. |
| +static const int kStartingBufferSizeInFrames = 16 * 512; |
| + |
| AudioRendererAlgorithm::AudioRendererAlgorithm() |
| : channels_(0), |
| samples_per_second_(0), |
| playback_rate_(0), |
| - frames_in_crossfade_(0), |
| - index_into_window_(0), |
| - crossfade_frame_number_(0), |
| muted_(false), |
| muted_partial_frame_(0), |
| - window_size_(0), |
| - capacity_(kStartingBufferSizeInFrames) { |
| + capacity_(kStartingBufferSizeInFrames), |
| + output_index_(0), |
| + search_block_center_offset_(0), |
| + num_candidate_blocks_(0), |
| + target_block_index_(0), |
| + ola_window_size_(0), |
| + ola_hop_size_(0), |
| + num_complete_frames_(0) { |
| } |
| AudioRendererAlgorithm::~AudioRendererAlgorithm() {} |
| @@ -58,16 +64,58 @@ void AudioRendererAlgorithm::Initialize(float initial_playback_rate, |
| channels_ = params.channels(); |
| samples_per_second_ = params.sample_rate(); |
| SetPlaybackRate(initial_playback_rate); |
| - |
| - window_size_ = samples_per_second_ * kWindowDuration; |
| - frames_in_crossfade_ = samples_per_second_ * kCrossfadeDuration; |
| - crossfade_buffer_ = AudioBus::Create(channels_, frames_in_crossfade_); |
| + num_candidate_blocks_ = (kWsolaSearchIntervalMs * samples_per_second_) / 1000; |
| + ola_window_size_ = kOlaWindowSizeMs * samples_per_second_ / 1000; |
| + |
| + // Make sure window size in an even number. |
| + ola_window_size_ += ola_window_size_ & 1; |
| + ola_hop_size_ = ola_window_size_ / 2; |
| + |
| + // |num_candidate_blocks_| / 2 is the offset of the center of the search |
| + // block to the center of the first (left most) candidate block. The offset |
| + // of the center of a candidate block to its left most point is |
| + // |ola_window_size_| / 2 - 1. Note that |ola_window_size_| is even and in |
| + // our convention the center belongs to the left half, so we need to subtract |
| + // one frame to get the correct offset. |
| + // |
| + // Search Block |
| + // <-------------------------------------------> |
| + // |
| + // |ola_window_size_| / 2 - 1 |
| + // <---- |
| + // |
| + // |num_candidate_blocks_| / 2 |
| + // <---------------- |
| + // center |
| + // X----X----------------X---------------X-----X |
| + // <----------> <----------> |
| + // Candidate ... Candidate |
| + // 1, ... |num_candidate_blocks_| |
| + search_block_center_offset_ = num_candidate_blocks_ / 2 + |
| + (ola_window_size_ / 2 - 1); |
| + |
| + ola_window_.reset(new float[ola_window_size_]); |
| + internal::GetSymmetricHanningWindow(ola_window_size_, ola_window_.get()); |
| + |
| + transition_window_.reset(new float[ola_window_size_ * 2]); |
| + internal::GetSymmetricHanningWindow(2 * ola_window_size_, |
| + transition_window_.get()); |
| + |
| + wsola_output_ = AudioBus::Create(channels_, ola_window_size_ + ola_hop_size_); |
| + |
| + // Auxiliary containers. |
| + optimal_block_ = AudioBus::Create(channels_, ola_window_size_); |
| + search_block_ = AudioBus::Create( |
| + channels_, num_candidate_blocks_ + (ola_window_size_ - 1)); |
| + target_block_ = AudioBus::Create(channels_, ola_window_size_); |
| } |
| int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) { |
| if (playback_rate_ == 0) |
| return 0; |
| + DCHECK_EQ(channels_, dest->channels()); |
| + |
| // Optimize the |muted_| case to issue a single clear instead of performing |
| // the full crossfade and clearing each crossfaded frame. |
| if (muted_) { |
| @@ -93,12 +141,12 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) { |
| return frames_to_render; |
| } |
| - int slower_step = ceil(window_size_ * playback_rate_); |
| - int faster_step = ceil(window_size_ / playback_rate_); |
| + int slower_step = ceil(ola_window_size_ * playback_rate_); |
| + int faster_step = ceil(ola_window_size_ / playback_rate_); |
| // Optimize the most common |playback_rate_| ~= 1 case to use a single copy |
| // instead of copying frame by frame. |
| - if (window_size_ <= faster_step && slower_step >= window_size_) { |
| + if (ola_window_size_ <= faster_step && slower_step >= ola_window_size_) { |
| const int frames_to_copy = |
| std::min(audio_buffer_.frames(), requested_frames); |
| const int frames_read = audio_buffer_.ReadFrames(frames_to_copy, 0, dest); |
| @@ -106,277 +154,225 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) { |
| return frames_read; |
| } |
| - int total_frames_rendered = 0; |
| - while (total_frames_rendered < requested_frames) { |
| - if (index_into_window_ >= window_size_) |
| - ResetWindow(); |
| - |
| - int rendered_frames = 0; |
| - if (window_size_ > faster_step) { |
| - rendered_frames = |
| - OutputFasterPlayback(dest, |
| - total_frames_rendered, |
| - requested_frames - total_frames_rendered, |
| - window_size_, |
| - faster_step); |
| - } else if (slower_step < window_size_) { |
| - rendered_frames = |
| - OutputSlowerPlayback(dest, |
| - total_frames_rendered, |
| - requested_frames - total_frames_rendered, |
| - slower_step, |
| - window_size_); |
| - } else { |
| - NOTREACHED(); |
| - } |
| + int rendered_frames = 0; |
| + do { |
| + rendered_frames += WriteCompletedFramesTo( |
| + requested_frames - rendered_frames, rendered_frames, dest); |
| + } while (rendered_frames < requested_frames && WsolaIteration()); |
| + return rendered_frames; |
| +} |
| - if (rendered_frames == 0) |
| - break; |
| +void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) { |
| + DCHECK_GE(new_rate, 0); |
| + // Round it to two decimal digits. |
|
DaleCurtis
2013/08/13 21:11:04
Is this really necessary? I don't really follow w
turaj
2013/08/16 22:13:56
I will rephrase the comment. Necessary in the sens
|
| + playback_rate_ = floor(new_rate * 100.f + 0.5f) / 100; |
| + muted_ = |
| + playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate; |
| +} |
| - total_frames_rendered += rendered_frames; |
| - } |
| - return total_frames_rendered; |
| +void AudioRendererAlgorithm::FlushBuffers() { |
| + // Clear the queue of decoded packets (releasing the buffers). |
| + audio_buffer_.Clear(); |
| + output_index_ = 0; |
| + target_block_index_ = 0; |
| + wsola_output_->Zero(); |
| + num_complete_frames_ = 0; |
| } |
| -void AudioRendererAlgorithm::ResetWindow() { |
| - DCHECK_LE(index_into_window_, window_size_); |
| - index_into_window_ = 0; |
| - crossfade_frame_number_ = 0; |
| +base::TimeDelta AudioRendererAlgorithm::GetTime() { |
| + return audio_buffer_.current_time(); |
| } |
| -int AudioRendererAlgorithm::OutputFasterPlayback(AudioBus* dest, |
| - int dest_offset, |
| - int requested_frames, |
| - int input_step, |
| - int output_step) { |
| - // Ensure we don't run into OOB read/write situation. |
| - CHECK_GT(input_step, output_step); |
| - DCHECK_LT(index_into_window_, window_size_); |
| - DCHECK_GT(playback_rate_, 1.0); |
| - DCHECK(!muted_); |
| - |
| - if (audio_buffer_.frames() < 1) |
| - return 0; |
| +void AudioRendererAlgorithm::EnqueueBuffer( |
| + const scoped_refptr<AudioBuffer>& buffer_in) { |
| + DCHECK(!buffer_in->end_of_stream()); |
| + audio_buffer_.Append(buffer_in); |
| +} |
| - // The audio data is output in a series of windows. For sped-up playback, |
| - // the window is comprised of the following phases: |
| - // |
| - // a) Output raw data. |
| - // b) Save bytes for crossfade in |crossfade_buffer_|. |
| - // c) Drop data. |
| - // d) Output crossfaded audio leading up to the next window. |
| - // |
| - // The duration of each phase is computed below based on the |window_size_| |
| - // and |playback_rate_|. |
| - DCHECK_LE(frames_in_crossfade_, output_step); |
| +bool AudioRendererAlgorithm::IsQueueFull() { |
| + return audio_buffer_.frames() >= capacity_; |
| +} |
| - // This is the index of the end of phase a, beginning of phase b. |
| - int outtro_crossfade_begin = output_step - frames_in_crossfade_; |
| +void AudioRendererAlgorithm::IncreaseQueueCapacity() { |
| + capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames); |
| +} |
| - // This is the index of the end of phase b, beginning of phase c. |
| - int outtro_crossfade_end = output_step; |
| +bool AudioRendererAlgorithm::CanPerformWsola() const { |
| + const int search_block_size = num_candidate_blocks_ + (ola_window_size_ - 1); |
| + const int frames = audio_buffer_.frames(); |
| + if (target_block_index_ + ola_window_size_ <= frames && |
|
DaleCurtis
2013/08/13 21:11:04
Just return instead of if?
turaj
2013/08/16 22:13:56
Done.
|
| + GetSearchRegionIndex() + search_block_size <= frames) { |
| + return true; |
| + } |
| + return false; |
| +} |
| - // This is the index of the end of phase c, beginning of phase d. |
| - // This phase continues until |index_into_window_| reaches |window_size_|, at |
| - // which point the window restarts. |
| - int intro_crossfade_begin = input_step - frames_in_crossfade_; |
| +bool AudioRendererAlgorithm::WsolaIteration() { |
| + if (!CanPerformWsola()) |
| + return false; |
| + |
| + if (!GetOptimalBlock()) |
| + return false; // We cannot continue as |optimal_block| is not found. |
| + // There was not enough data. |
| + |
| + // Overlap-and-add. |
| + for (int k = 0; k < channels_; ++k) { |
| + float* ch_opt_frame = optimal_block_->channel(k); |
|
DaleCurtis
2013/08/13 21:11:04
const float* const ?
turaj
2013/08/16 22:13:56
Done.
|
| + float* ch_output = wsola_output_->channel(k) + num_complete_frames_; |
| + for (int n = 0; n < ola_hop_size_; ++n) { |
| + ch_output[n] = ch_output[n] * ola_window_[ola_hop_size_ + n] + |
| + ch_opt_frame[n] * ola_window_[n]; |
| + } |
| - // a) Output raw frames if we haven't reached the crossfade section. |
| - if (index_into_window_ < outtro_crossfade_begin) { |
| - // Read as many frames as we can and return the count. If it's not enough, |
| - // we will get called again. |
| - const int frames_to_copy = |
| - std::min(requested_frames, outtro_crossfade_begin - index_into_window_); |
| - int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest); |
| - index_into_window_ += copied; |
| - return copied; |
| + // Copy the second half to the output. |
| + memcpy(&ch_output[ola_hop_size_], &ch_opt_frame[ola_hop_size_], |
| + sizeof(*ch_opt_frame) * ola_hop_size_); |
| } |
| - // b) Save outtro crossfade frames into intermediate buffer, but do not output |
| - // anything to |dest|. |
| - if (index_into_window_ < outtro_crossfade_end) { |
| - // This phase only applies if there are bytes to crossfade. |
| - DCHECK_GT(frames_in_crossfade_, 0); |
| - int crossfade_start = index_into_window_ - outtro_crossfade_begin; |
| - int crossfade_count = outtro_crossfade_end - index_into_window_; |
| - int copied = audio_buffer_.ReadFrames( |
| - crossfade_count, crossfade_start, crossfade_buffer_.get()); |
| - index_into_window_ += copied; |
| - |
| - // Did we get all the frames we need? If not, return and let subsequent |
| - // calls try to get the rest. |
| - if (copied != crossfade_count) |
| - return 0; |
| - } |
| + num_complete_frames_ += ola_hop_size_; |
| + output_index_ += ola_hop_size_; |
| - // c) Drop frames until we reach the intro crossfade section. |
| - if (index_into_window_ < intro_crossfade_begin) { |
| - // Check if there is enough data to skip all the frames needed. If not, |
| - // return 0 and let subsequent calls try to skip it all. |
| - int seek_frames = intro_crossfade_begin - index_into_window_; |
| - if (audio_buffer_.frames() < seek_frames) |
| - return 0; |
| - audio_buffer_.SeekFrames(seek_frames); |
| + RemoveOldInputFrames(); |
| + return true; |
| +} |
| - // We've dropped all the frames that need to be dropped. |
| - index_into_window_ += seek_frames; |
| - } |
| +int AudioRendererAlgorithm::GetSearchRegionIndex() const { |
|
DaleCurtis
2013/08/13 21:11:04
You call this pretty frequently, it might be worth
turaj
2013/08/16 22:13:56
I did that, but it feels a bit unsafe as one has t
DaleCurtis
2013/08/19 22:15:23
Make this UpdateOutputIndex() and document on |out
turaj
2013/08/21 01:01:19
Done.
|
| + // Center of the search region, in frames. |
| + const int search_block_center_index = static_cast<int>(floor( |
|
DaleCurtis
2013/08/13 21:11:04
no need for floor + static_cast<int>, the cast is
turaj
2013/08/16 22:13:56
I thought without cast we get warning on Windows.
DaleCurtis
2013/08/19 22:15:23
Yes, you probably need the cast, but not cast and
|
| + output_index_ * playback_rate_ + 0.5)); |
| - // d) Crossfade and output a frame, as long as we have data. |
| - if (audio_buffer_.frames() < 1) |
| - return 0; |
| - DCHECK_GT(frames_in_crossfade_, 0); |
| - DCHECK_LT(index_into_window_, window_size_); |
| - |
| - int offset_into_buffer = index_into_window_ - intro_crossfade_begin; |
| - int copied = audio_buffer_.ReadFrames(1, dest_offset, dest); |
| - DCHECK_EQ(copied, 1); |
| - CrossfadeFrame(crossfade_buffer_.get(), |
| - offset_into_buffer, |
| - dest, |
| - dest_offset, |
| - offset_into_buffer); |
| - index_into_window_ += copied; |
| - return copied; |
| + // Index of the beginning of the search region, in frames. |
| + return search_block_center_index - search_block_center_offset_; |
| } |
| -int AudioRendererAlgorithm::OutputSlowerPlayback(AudioBus* dest, |
| - int dest_offset, |
| - int requested_frames, |
| - int input_step, |
| - int output_step) { |
| - // Ensure we don't run into OOB read/write situation. |
| - CHECK_LT(input_step, output_step); |
| - DCHECK_LT(index_into_window_, window_size_); |
| - DCHECK_LT(playback_rate_, 1.0); |
| - DCHECK_NE(playback_rate_, 0); |
| - DCHECK(!muted_); |
| - |
| - if (audio_buffer_.frames() < 1) |
| - return 0; |
| - |
| - // The audio data is output in a series of windows. For slowed down playback, |
| - // the window is comprised of the following phases: |
| - // |
| - // a) Output raw data. |
| - // b) Output and save bytes for crossfade in |crossfade_buffer_|. |
| - // c) Output* raw data. |
| - // d) Output* crossfaded audio leading up to the next window. |
| - // |
| - // * Phases c) and d) do not progress |audio_buffer_|'s cursor so that the |
| - // |audio_buffer_|'s cursor is in the correct place for the next window. |
| - // |
| - // The duration of each phase is computed below based on the |window_size_| |
| - // and |playback_rate_|. |
| - DCHECK_LE(frames_in_crossfade_, input_step); |
| +void AudioRendererAlgorithm::RemoveOldInputFrames() { |
|
DaleCurtis
2013/08/13 21:11:04
Instead of trying to calculate how many input fram
turaj
2013/08/16 22:13:56
what we need to know is the earliest frame we need
|
| + const int earliest_used_index = std::min(target_block_index_, |
| + GetSearchRegionIndex()); |
| + |
| + if (earliest_used_index < 0) |
| + return; // Nothing to remove |
| + |
| + // Assuming |playback_rate_| * 100 == floor(|playback_rate_| * 100) |
| + // that is |playback_rate_| is represented by 2 decimal digits, only. |
| + // We eliminate blocks of size 100 * |playback_rate_| from input. |
| + const int kOutputFramesPerBlock = 100; |
| + const int input_frames_per_block = |
| + static_cast<int>(floor(playback_rate_ * kOutputFramesPerBlock + 0.5f)); |
|
DaleCurtis
2013/08/13 21:11:04
Again, floor + cast is unnecessary.
turaj
2013/08/16 22:13:56
Done.
|
| + const int blocks_to_remove = earliest_used_index / input_frames_per_block; |
|
DaleCurtis
2013/08/13 21:11:04
Is num >> den, such that integer division isn't lo
turaj
2013/08/16 22:13:56
we actually need the integer part of |earliest_use
|
| + const int input_frames_to_remove = input_frames_per_block * blocks_to_remove; |
| + |
| + // Remove frames from input and adjust indices accordingly. |
| + audio_buffer_.SeekFrames(input_frames_to_remove); |
| + target_block_index_ -= input_frames_to_remove; |
| + |
| + // Adjust output index. |
| + output_index_ -= kOutputFramesPerBlock * blocks_to_remove; |
| + DCHECK_GE(output_index_, 0); |
| +} |
| - // This is the index of the end of phase a, beginning of phase b. |
| - int intro_crossfade_begin = input_step - frames_in_crossfade_; |
| +int AudioRendererAlgorithm::WriteCompletedFramesTo( |
| + int requested_frames, int dest_offset, AudioBus* dest) { |
| + int rendered_frames = std::min(num_complete_frames_, requested_frames); |
| - // This is the index of the end of phase b, beginning of phase c. |
| - int intro_crossfade_end = input_step; |
| + if (rendered_frames == 0) |
| + return 0; // There is nothing to read from |wsola_output_|, return. |
| - // This is the index of the end of phase c, beginning of phase d. |
| - // This phase continues until |index_into_window_| reaches |window_size_|, at |
| - // which point the window restarts. |
| - int outtro_crossfade_begin = output_step - frames_in_crossfade_; |
| + wsola_output_->CopyPartialFramesTo(0, rendered_frames, dest_offset, dest); |
| - // a) Output raw frames. |
| - if (index_into_window_ < intro_crossfade_begin) { |
| - // Read as many frames as we can and return the count. If it's not enough, |
| - // we will get called again. |
| - const int frames_to_copy = |
| - std::min(requested_frames, intro_crossfade_begin - index_into_window_); |
| - int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest); |
| - index_into_window_ += copied; |
| - return copied; |
| + // Remove the frames which are read. |
| + int frames_to_move = wsola_output_->frames() - rendered_frames; |
|
DaleCurtis
2013/08/13 21:11:04
You should be able to use the Copy helpers in Audi
turaj
2013/08/16 22:13:56
CopyTo() uses memcpy() and I'm concerned about the
|
| + for (int k = 0; k < channels_; ++k) { |
| + float* ch = wsola_output_->channel(k); |
| + memmove(ch, &ch[rendered_frames], sizeof(*ch) * frames_to_move); |
|
DaleCurtis
2013/08/13 21:11:04
Necessary vs memcpy? They don't seem to overlap?
turaj
2013/08/16 22:13:56
I'm not expecting to be zeroed, and there is no gu
|
| } |
| + num_complete_frames_ -= rendered_frames; |
| + return rendered_frames; |
| +} |
| - // b) Save the raw frames for the intro crossfade section, then copy the |
| - // same frames to |dest|. |
| - if (index_into_window_ < intro_crossfade_end) { |
| - const int frames_to_copy = |
| - std::min(requested_frames, intro_crossfade_end - index_into_window_); |
| - int offset = index_into_window_ - intro_crossfade_begin; |
| - int copied = audio_buffer_.ReadFrames( |
| - frames_to_copy, offset, crossfade_buffer_.get()); |
| - crossfade_buffer_->CopyPartialFramesTo(offset, copied, dest_offset, dest); |
| - index_into_window_ += copied; |
| - return copied; |
| - } |
| +bool AudioRendererAlgorithm::TargetIsWithinSearchRegion() const { |
| + const int search_block_index = GetSearchRegionIndex(); |
| + const int search_block_size = num_candidate_blocks_ + (ola_window_size_ - 1); |
| - // c) Output a raw frame into |dest| without advancing the |audio_buffer_| |
| - // cursor. |
| - int audio_buffer_offset = index_into_window_ - intro_crossfade_end; |
| - DCHECK_GE(audio_buffer_offset, 0); |
| - if (audio_buffer_.frames() <= audio_buffer_offset) |
| - return 0; |
| - int copied = |
| - audio_buffer_.PeekFrames(1, audio_buffer_offset, dest_offset, dest); |
| - DCHECK_EQ(1, copied); |
| - |
| - // d) Crossfade the next frame of |crossfade_buffer_| into |dest| if we've |
| - // reached the outtro crossfade section of the window. |
| - if (index_into_window_ >= outtro_crossfade_begin) { |
| - int offset_into_crossfade_buffer = |
| - index_into_window_ - outtro_crossfade_begin; |
| - CrossfadeFrame(dest, |
| - dest_offset, |
| - crossfade_buffer_.get(), |
| - offset_into_crossfade_buffer, |
| - offset_into_crossfade_buffer); |
| + if (target_block_index_ >= search_block_index && |
|
DaleCurtis
2013/08/13 21:11:04
Again, just return the value directly.
turaj
2013/08/16 22:13:56
Done.
|
| + target_block_index_ + ola_window_size_ <= |
| + search_block_index + search_block_size) { |
| + return true; |
| } |
| - |
| - index_into_window_ += copied; |
| - return copied; |
| + return false; |
| } |
| -void AudioRendererAlgorithm::CrossfadeFrame(AudioBus* intro, |
| - int intro_offset, |
| - AudioBus* outtro, |
| - int outtro_offset, |
| - int fade_offset) { |
| - float crossfade_ratio = |
| - static_cast<float>(fade_offset) / frames_in_crossfade_; |
| - for (int channel = 0; channel < channels_; ++channel) { |
| - outtro->channel(channel)[outtro_offset] = |
| - (1.0f - crossfade_ratio) * intro->channel(channel)[intro_offset] + |
| - (crossfade_ratio) * outtro->channel(channel)[outtro_offset]; |
| +bool AudioRendererAlgorithm::GetOptimalBlock() { |
| + int optimal_index = 0; |
| + if (TargetIsWithinSearchRegion()) { |
| + optimal_index = target_block_index_; |
| + // Get the optimal window. |
| + if (!PeekAudioWithZeroAppend(optimal_index, optimal_block_.get())) |
| + return false; |
| + } else { |
| + if (!PeekAudioWithZeroAppend(target_block_index_, target_block_.get())) |
| + return false; |
| + const int search_block_index = GetSearchRegionIndex(); |
| + |
| + if (!PeekAudioWithZeroAppend(search_block_index, search_block_.get())) |
| + return false; |
| + |
| + int last_optimal = target_block_index_ - ola_hop_size_ - |
| + search_block_index; |
| + internal::Interval exclude_iterval = std::make_pair(last_optimal - 80, |
|
DaleCurtis
2013/08/13 21:11:04
80? Extract to constant w/ documentation.
turaj
2013/08/16 22:13:56
Done.
|
| + last_optimal + 80); |
| + // |optimal_index| is in frames and it is relative to the beginning |
| + // of the |search_block_|. |
| + optimal_index = internal::OptimalIndex( |
| + search_block_.get(), target_block_.get(), exclude_iterval); |
| + |
| + // Translate |index| w.r.t. the beginning of |audio_buffer_|. |
| + optimal_index += search_block_index; |
| + |
| + // Get the optimal window. |
| + PeekAudioWithZeroAppend(optimal_index, optimal_block_.get()); |
| + |
| + // Make a transition from target block to the optimal block if different. |
| + // Target block has the best continuation to the current output. |
| + // Optimal block is the most similar block to the target, however, it might |
| + // introduce some discontinuity when over-lap-added. Therefore, we combine |
| + // them for a smoother transition. The length of transition window is twice |
| + // as that of the optimal-block which makes it like a weighting function |
| + // where target-block has higher weight close to zero (weight of 1 at index |
| + // 0) and lower weight close the end. |
| + for (int k = 0; k < channels_; ++k) { |
| + float* ch_opt = optimal_block_->channel(k); |
| + float* ch_target = target_block_->channel(k); |
|
DaleCurtis
2013/08/13 21:11:04
const float* const ?
turaj
2013/08/16 22:13:56
Done.
|
| + for (int n = 0; n < ola_window_size_; ++n) { |
| + ch_opt[n] = ch_opt[n] * transition_window_[n] + ch_target[n] * |
| + transition_window_[ola_window_size_ + n]; |
| + } |
| + } |
| } |
| -} |
| - |
| -void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) { |
| - DCHECK_GE(new_rate, 0); |
| - playback_rate_ = new_rate; |
| - muted_ = |
| - playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate; |
| - |
| - ResetWindow(); |
| -} |
| - |
| -void AudioRendererAlgorithm::FlushBuffers() { |
| - ResetWindow(); |
| - |
| - // Clear the queue of decoded packets (releasing the buffers). |
| - audio_buffer_.Clear(); |
| -} |
| - |
| -base::TimeDelta AudioRendererAlgorithm::GetTime() { |
| - return audio_buffer_.current_time(); |
| -} |
| - |
| -void AudioRendererAlgorithm::EnqueueBuffer( |
| - const scoped_refptr<AudioBuffer>& buffer_in) { |
| - DCHECK(!buffer_in->end_of_stream()); |
| - audio_buffer_.Append(buffer_in); |
| -} |
| -bool AudioRendererAlgorithm::IsQueueFull() { |
| - return audio_buffer_.frames() >= capacity_; |
| + // Next target is one hop ahead of the current optimal. |
| + target_block_index_ = optimal_index + ola_hop_size_; |
| + return true; |
| } |
| -void AudioRendererAlgorithm::IncreaseQueueCapacity() { |
| - capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames); |
| +bool AudioRendererAlgorithm::PeekAudioWithZeroAppend( |
|
DaleCurtis
2013/08/13 21:11:04
Technically this is prepending data, not appending
turaj
2013/08/16 22:13:56
Done.
|
| + int read_offset_frames, AudioBus* dest) { |
| + int num_frames = dest->frames(); |
| + if (read_offset_frames + num_frames > audio_buffer_.frames()) |
|
DaleCurtis
2013/08/13 21:11:04
Should this be a CHECK() instead? Then this functi
turaj
2013/08/16 22:13:56
We can do that if you advise so. I was more along
DaleCurtis
2013/08/19 22:15:23
Chrome prefers to minimize potential paths through
turaj
2013/08/21 01:01:19
Sure.
On 2013/08/19 22:15:23, DaleCurtis wrote:
|
| + return false; |
| + |
| + int write_offset = 0; |
| + int num_frames_to_read = dest->frames(); |
|
DaleCurtis
2013/08/13 21:11:04
You have two num_frames variables which do the sam
turaj
2013/08/16 22:13:56
Done.
|
| + if (read_offset_frames < 0) { |
| + int num_zero_frames_appended = std::min(-read_offset_frames, |
| + num_frames_to_read); |
| + read_offset_frames = 0; |
| + num_frames_to_read -= num_zero_frames_appended; |
| + write_offset = num_zero_frames_appended; |
| + dest->ZeroFrames(num_zero_frames_appended); |
| + } |
| + audio_buffer_.PeekFrames(num_frames_to_read, read_offset_frames, |
|
DaleCurtis
2013/08/13 21:11:04
Should this return true if zero frames are peeked?
turaj
2013/08/16 22:13:56
According to the previous comments this function i
|
| + write_offset, dest); |
| + return true; |
| } |
| } // namespace media |