Index: media/filters/audio_renderer_algorithm.cc |
diff --git a/media/filters/audio_renderer_algorithm.cc b/media/filters/audio_renderer_algorithm.cc |
index 97f0811384159ff35868bb45f10eabdc7a94d86b..181cbea4425d63cd37217b52610a8be71883d906 100644 |
--- a/media/filters/audio_renderer_algorithm.cc |
+++ b/media/filters/audio_renderer_algorithm.cc |
@@ -12,41 +12,47 @@ |
#include "media/audio/audio_util.h" |
#include "media/base/audio_buffer.h" |
#include "media/base/audio_bus.h" |
+#include "media/filters/wsola_internals.h" |
namespace media { |
-// The starting size in frames for |audio_buffer_|. Previous usage maintained a |
-// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we |
-// maintain this number of frames. |
-static const int kStartingBufferSizeInFrames = 16 * 512; |
- |
// The maximum size in frames for the |audio_buffer_|. Arbitrarily determined. |
// This number represents 3 seconds of 96kHz/16 bit 7.1 surround sound. |
static const int kMaxBufferSizeInFrames = 3 * 96000; |
-// Duration of audio segments used for crossfading (in seconds). |
-static const double kWindowDuration = 0.08; |
- |
-// Duration of crossfade between audio segments (in seconds). |
-static const double kCrossfadeDuration = 0.008; |
- |
// Max/min supported playback rates for fast/slow audio. Audio outside of these |
// ranges are muted. |
// Audio at these speeds would sound better under a frequency domain algorithm. |
static const float kMinPlaybackRate = 0.5f; |
static const float kMaxPlaybackRate = 4.0f; |
+// Overlap-and-add window size in milliseconds. |
+static const int kOlaWindowSizeMs = 20; |
+ |
+// Size of search interval in milliseconds. The search interval is |
+// [-delta delta] around |output_index_| * |playback_rate_|. So the search |
+// interval is 2 * delta. |
+static const int kWsolaSearchIntervalMs = 30; |
+ |
+// The starting size in frames for |audio_buffer_|. Previous usage maintained a |
+// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we |
+// maintain this number of frames. |
+static const int kStartingBufferSizeInFrames = 16 * 512; |
+ |
AudioRendererAlgorithm::AudioRendererAlgorithm() |
: channels_(0), |
samples_per_second_(0), |
playback_rate_(0), |
- frames_in_crossfade_(0), |
- index_into_window_(0), |
- crossfade_frame_number_(0), |
muted_(false), |
muted_partial_frame_(0), |
- window_size_(0), |
- capacity_(kStartingBufferSizeInFrames) { |
+ capacity_(kStartingBufferSizeInFrames), |
+ output_index_(0), |
+ search_block_center_offset_(0), |
+ num_candidate_frames_(0), |
+ target_block_index_(0), |
+ ola_window_size_(0), |
+ ola_hop_size_(0), |
+ num_complete_frames_(0) { |
} |
AudioRendererAlgorithm::~AudioRendererAlgorithm() {} |
@@ -59,9 +65,31 @@ void AudioRendererAlgorithm::Initialize(float initial_playback_rate, |
samples_per_second_ = params.sample_rate(); |
SetPlaybackRate(initial_playback_rate); |
- window_size_ = samples_per_second_ * kWindowDuration; |
- frames_in_crossfade_ = samples_per_second_ * kCrossfadeDuration; |
- crossfade_buffer_ = AudioBus::Create(channels_, frames_in_crossfade_); |
+ num_candidate_frames_ = |
+ (kWsolaSearchIntervalMs * samples_per_second_) / 1000 + 1; |
DaleCurtis
2013/08/06 18:04:55
Please include your comment about symmetry for the
turaj
2013/08/06 23:29:27
I gave up "+ 1" it is not at all necessary in this
|
+ |
+ // Make sure window size in an even number. |
+ ola_window_size_ = (kOlaWindowSizeMs * samples_per_second_ / 1000 / 2) * 2; |
DaleCurtis
2013/08/06 18:04:55
If you need both this and num_candidate_frames_ to
turaj
2013/08/06 23:29:27
We only have one member which has to be even, but
|
+ |
+ ola_hop_size_ = ola_window_size_ / 2; |
+ |
+ search_block_center_offset_ = (num_candidate_frames_ - 1) / 2 + ( |
DaleCurtis
2013/08/06 18:04:55
Again, comments for why you have the -1s.
turaj
2013/08/06 23:29:27
Done.
|
+ ola_window_size_ / 2 - 1); |
+ |
+ ola_window_.reset(new float[ola_window_size_]); |
DaleCurtis
2013/08/06 18:04:55
Do any of these structures need to be zero initial
turaj
2013/08/06 23:29:27
I didn't comprehend what you mean by "zero initial
|
+ internal::GetSymmetricHanningWindow(ola_window_size_, ola_window_.get()); |
+ |
+ transition_window_.reset(new float[ola_window_size_ * 2]); |
+ internal::GetSymmetricHanningWindow(2 * ola_window_size_, |
+ transition_window_.get()); |
+ |
+ wsola_output_ = AudioBus::Create(channels_, ola_window_size_ + ola_hop_size_); |
+ |
+ // Auxiliary containers. |
+ optimal_block_ = AudioBus::Create(channels_, ola_window_size_); |
+ search_block_ = AudioBus::Create( |
+ channels_, num_candidate_frames_ + (ola_window_size_ - 1)); |
+ target_block_ = AudioBus::Create(channels_, ola_window_size_); |
} |
int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) { |
@@ -93,12 +121,12 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) { |
return frames_to_render; |
} |
- int slower_step = ceil(window_size_ * playback_rate_); |
- int faster_step = ceil(window_size_ / playback_rate_); |
+ int slower_step = ceil(ola_window_size_ * playback_rate_); |
+ int faster_step = ceil(ola_window_size_ / playback_rate_); |
// Optimize the most common |playback_rate_| ~= 1 case to use a single copy |
// instead of copying frame by frame. |
- if (window_size_ <= faster_step && slower_step >= window_size_) { |
+ if (ola_window_size_ <= faster_step && slower_step >= ola_window_size_) { |
const int frames_to_copy = |
std::min(audio_buffer_.frames(), requested_frames); |
const int frames_read = audio_buffer_.ReadFrames(frames_to_copy, 0, dest); |
@@ -106,277 +134,231 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) { |
return frames_read; |
} |
- int total_frames_rendered = 0; |
- while (total_frames_rendered < requested_frames) { |
- if (index_into_window_ >= window_size_) |
- ResetWindow(); |
- |
- int rendered_frames = 0; |
- if (window_size_ > faster_step) { |
- rendered_frames = |
- OutputFasterPlayback(dest, |
- total_frames_rendered, |
- requested_frames - total_frames_rendered, |
- window_size_, |
- faster_step); |
- } else if (slower_step < window_size_) { |
- rendered_frames = |
- OutputSlowerPlayback(dest, |
- total_frames_rendered, |
- requested_frames - total_frames_rendered, |
- slower_step, |
- window_size_); |
- } else { |
- NOTREACHED(); |
- } |
+ int total_frames_rendered = WsolaOutput(requested_frames, dest); |
DaleCurtis
2013/08/06 18:04:55
I commented in the header about function naming, b
turaj
2013/08/06 23:29:27
True. I thought that I keep a single method as API
|
+ return total_frames_rendered; |
+} |
- if (rendered_frames == 0) |
- break; |
+void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) { |
+ DCHECK_GE(new_rate, 0); |
+ // Round it to two decimal digits. |
+ playback_rate_ = floor(new_rate * 100.f + 0.5f) / 100; |
+ muted_ = |
+ playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate; |
+} |
- total_frames_rendered += rendered_frames; |
- } |
- return total_frames_rendered; |
+void AudioRendererAlgorithm::FlushBuffers() { |
+ // Clear the queue of decoded packets (releasing the buffers). |
+ audio_buffer_.Clear(); |
+ output_index_ = 0; |
+ target_block_index_ = 0; |
+ wsola_output_->Zero(); |
+ num_complete_frames_ = 0; |
} |
-void AudioRendererAlgorithm::ResetWindow() { |
- DCHECK_LE(index_into_window_, window_size_); |
- index_into_window_ = 0; |
- crossfade_frame_number_ = 0; |
+base::TimeDelta AudioRendererAlgorithm::GetTime() { |
+ return audio_buffer_.current_time(); |
} |
-int AudioRendererAlgorithm::OutputFasterPlayback(AudioBus* dest, |
- int dest_offset, |
- int requested_frames, |
- int input_step, |
- int output_step) { |
- // Ensure we don't run into OOB read/write situation. |
- CHECK_GT(input_step, output_step); |
- DCHECK_LT(index_into_window_, window_size_); |
- DCHECK_GT(playback_rate_, 1.0); |
- DCHECK(!muted_); |
- |
- if (audio_buffer_.frames() < 1) |
- return 0; |
+void AudioRendererAlgorithm::EnqueueBuffer( |
+ const scoped_refptr<AudioBuffer>& buffer_in) { |
+ DCHECK(!buffer_in->end_of_stream()); |
+ audio_buffer_.Append(buffer_in); |
+} |
- // The audio data is output in a series of windows. For sped-up playback, |
- // the window is comprised of the following phases: |
- // |
- // a) Output raw data. |
- // b) Save bytes for crossfade in |crossfade_buffer_|. |
- // c) Drop data. |
- // d) Output crossfaded audio leading up to the next window. |
- // |
- // The duration of each phase is computed below based on the |window_size_| |
- // and |playback_rate_|. |
- DCHECK_LE(frames_in_crossfade_, output_step); |
- |
- // This is the index of the end of phase a, beginning of phase b. |
- int outtro_crossfade_begin = output_step - frames_in_crossfade_; |
- |
- // This is the index of the end of phase b, beginning of phase c. |
- int outtro_crossfade_end = output_step; |
- |
- // This is the index of the end of phase c, beginning of phase d. |
- // This phase continues until |index_into_window_| reaches |window_size_|, at |
- // which point the window restarts. |
- int intro_crossfade_begin = input_step - frames_in_crossfade_; |
- |
- // a) Output raw frames if we haven't reached the crossfade section. |
- if (index_into_window_ < outtro_crossfade_begin) { |
- // Read as many frames as we can and return the count. If it's not enough, |
- // we will get called again. |
- const int frames_to_copy = |
- std::min(requested_frames, outtro_crossfade_begin - index_into_window_); |
- int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest); |
- index_into_window_ += copied; |
- return copied; |
- } |
+bool AudioRendererAlgorithm::IsQueueFull() { |
+ return audio_buffer_.frames() >= capacity_; |
+} |
+ |
+void AudioRendererAlgorithm::IncreaseQueueCapacity() { |
+ capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames); |
+} |
- // b) Save outtro crossfade frames into intermediate buffer, but do not output |
- // anything to |dest|. |
- if (index_into_window_ < outtro_crossfade_end) { |
- // This phase only applies if there are bytes to crossfade. |
- DCHECK_GT(frames_in_crossfade_, 0); |
- int crossfade_start = index_into_window_ - outtro_crossfade_begin; |
- int crossfade_count = outtro_crossfade_end - index_into_window_; |
- int copied = audio_buffer_.ReadFrames( |
- crossfade_count, crossfade_start, crossfade_buffer_.get()); |
- index_into_window_ += copied; |
- |
- // Did we get all the frames we need? If not, return and let subsequent |
- // calls try to get the rest. |
- if (copied != crossfade_count) |
- return 0; |
+bool AudioRendererAlgorithm::CanPerformWsola() const { |
+ const int search_block_size = num_candidate_frames_ + (ola_window_size_ - 1); |
+ const int frames = audio_buffer_.frames(); |
+ if (target_block_index_ + ola_window_size_ <= frames && |
+ GetSearchRegionIndex() + search_block_size <= frames) { |
+ return true; |
} |
+ return false; |
+} |
- // c) Drop frames until we reach the intro crossfade section. |
- if (index_into_window_ < intro_crossfade_begin) { |
- // Check if there is enough data to skip all the frames needed. If not, |
- // return 0 and let subsequent calls try to skip it all. |
- int seek_frames = intro_crossfade_begin - index_into_window_; |
- if (audio_buffer_.frames() < seek_frames) |
- return 0; |
- audio_buffer_.SeekFrames(seek_frames); |
+int AudioRendererAlgorithm::WsolaOutput(int requested_frames, AudioBus* dest) { |
+ DCHECK_EQ(channels_, dest->channels()); |
- // We've dropped all the frames that need to be dropped. |
- index_into_window_ += seek_frames; |
+ // First read the frames which are ready. |
+ int rendered_frames = ReadWsolaOutput(requested_frames, 0, dest); |
+ while (rendered_frames < requested_frames && CanPerformWsola()) { |
+ rendered_frames += Wsola(requested_frames - rendered_frames, |
+ rendered_frames, dest); |
} |
- |
- // d) Crossfade and output a frame, as long as we have data. |
- if (audio_buffer_.frames() < 1) |
- return 0; |
- DCHECK_GT(frames_in_crossfade_, 0); |
- DCHECK_LT(index_into_window_, window_size_); |
- |
- int offset_into_buffer = index_into_window_ - intro_crossfade_begin; |
- int copied = audio_buffer_.ReadFrames(1, dest_offset, dest); |
- DCHECK_EQ(copied, 1); |
- CrossfadeFrame(crossfade_buffer_.get(), |
- offset_into_buffer, |
- dest, |
- dest_offset, |
- offset_into_buffer); |
- index_into_window_ += copied; |
- return copied; |
+ return rendered_frames; |
} |
-int AudioRendererAlgorithm::OutputSlowerPlayback(AudioBus* dest, |
- int dest_offset, |
- int requested_frames, |
- int input_step, |
- int output_step) { |
- // Ensure we don't run into OOB read/write situation. |
- CHECK_LT(input_step, output_step); |
- DCHECK_LT(index_into_window_, window_size_); |
- DCHECK_LT(playback_rate_, 1.0); |
- DCHECK_NE(playback_rate_, 0); |
- DCHECK(!muted_); |
- |
- if (audio_buffer_.frames() < 1) |
- return 0; |
+int AudioRendererAlgorithm::Wsola( |
+ int requested_frames, int dest_offset, AudioBus* dest) { |
+ if (!GetOptimalBlock()) |
+ return 0; // We cannot continue as |optimal_block| is not found. |
+ // There was not enough data. |
+ |
+ // Overlap-and-add. |
+ for (int k = 0; k < channels_; ++k) { |
+ float* ch_opt_frame = optimal_block_->channel(k); |
+ float* ch_output = wsola_output_->channel(k) + num_complete_frames_; |
+ for (int n = 0; n < ola_hop_size_; ++n) { |
+ ch_output[n] = ch_output[n] * ola_window_[ola_hop_size_ + n] + |
+ ch_opt_frame[n] * ola_window_[n]; |
+ } |
- // The audio data is output in a series of windows. For slowed down playback, |
- // the window is comprised of the following phases: |
- // |
- // a) Output raw data. |
- // b) Output and save bytes for crossfade in |crossfade_buffer_|. |
- // c) Output* raw data. |
- // d) Output* crossfaded audio leading up to the next window. |
- // |
- // * Phases c) and d) do not progress |audio_buffer_|'s cursor so that the |
- // |audio_buffer_|'s cursor is in the correct place for the next window. |
- // |
- // The duration of each phase is computed below based on the |window_size_| |
- // and |playback_rate_|. |
- DCHECK_LE(frames_in_crossfade_, input_step); |
- |
- // This is the index of the end of phase a, beginning of phase b. |
- int intro_crossfade_begin = input_step - frames_in_crossfade_; |
- |
- // This is the index of the end of phase b, beginning of phase c. |
- int intro_crossfade_end = input_step; |
- |
- // This is the index of the end of phase c, beginning of phase d. |
- // This phase continues until |index_into_window_| reaches |window_size_|, at |
- // which point the window restarts. |
- int outtro_crossfade_begin = output_step - frames_in_crossfade_; |
- |
- // a) Output raw frames. |
- if (index_into_window_ < intro_crossfade_begin) { |
- // Read as many frames as we can and return the count. If it's not enough, |
- // we will get called again. |
- const int frames_to_copy = |
- std::min(requested_frames, intro_crossfade_begin - index_into_window_); |
- int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest); |
- index_into_window_ += copied; |
- return copied; |
+ // Copy the second half to the output. |
+ memcpy(&ch_output[ola_hop_size_], &ch_opt_frame[ola_hop_size_], |
+ sizeof(*ch_opt_frame) * ola_hop_size_); |
} |
- // b) Save the raw frames for the intro crossfade section, then copy the |
- // same frames to |dest|. |
- if (index_into_window_ < intro_crossfade_end) { |
- const int frames_to_copy = |
- std::min(requested_frames, intro_crossfade_end - index_into_window_); |
- int offset = index_into_window_ - intro_crossfade_begin; |
- int copied = audio_buffer_.ReadFrames( |
- frames_to_copy, offset, crossfade_buffer_.get()); |
- crossfade_buffer_->CopyPartialFramesTo(offset, copied, dest_offset, dest); |
- index_into_window_ += copied; |
- return copied; |
- } |
+ num_complete_frames_ += ola_hop_size_; |
+ output_index_ += ola_hop_size_; |
- // c) Output a raw frame into |dest| without advancing the |audio_buffer_| |
- // cursor. |
- int audio_buffer_offset = index_into_window_ - intro_crossfade_end; |
- DCHECK_GE(audio_buffer_offset, 0); |
- if (audio_buffer_.frames() <= audio_buffer_offset) |
- return 0; |
- int copied = |
- audio_buffer_.PeekFrames(1, audio_buffer_offset, dest_offset, dest); |
- DCHECK_EQ(1, copied); |
- |
- // d) Crossfade the next frame of |crossfade_buffer_| into |dest| if we've |
- // reached the outtro crossfade section of the window. |
- if (index_into_window_ >= outtro_crossfade_begin) { |
- int offset_into_crossfade_buffer = |
- index_into_window_ - outtro_crossfade_begin; |
- CrossfadeFrame(dest, |
- dest_offset, |
- crossfade_buffer_.get(), |
- offset_into_crossfade_buffer, |
- offset_into_crossfade_buffer); |
- } |
+ RemoveOldInputFrames(); |
+ return ReadWsolaOutput(requested_frames, dest_offset, dest); |
+} |
- index_into_window_ += copied; |
- return copied; |
+int AudioRendererAlgorithm::GetSearchRegionIndex() const { |
+ // Center of the search region, in frames. |
+ const int search_block_center_index = static_cast<int>(floor( |
+ output_index_ * playback_rate_ + 0.5)); |
+ |
+ // Index of the beginning of the search region, in frames. |
+ return search_block_center_index - search_block_center_offset_; |
} |
-void AudioRendererAlgorithm::CrossfadeFrame(AudioBus* intro, |
- int intro_offset, |
- AudioBus* outtro, |
- int outtro_offset, |
- int fade_offset) { |
- float crossfade_ratio = |
- static_cast<float>(fade_offset) / frames_in_crossfade_; |
- for (int channel = 0; channel < channels_; ++channel) { |
- outtro->channel(channel)[outtro_offset] = |
- (1.0f - crossfade_ratio) * intro->channel(channel)[intro_offset] + |
- (crossfade_ratio) * outtro->channel(channel)[outtro_offset]; |
- } |
+void AudioRendererAlgorithm::RemoveOldInputFrames() { |
+ const int earliest_used_index = std::min(target_block_index_, |
+ GetSearchRegionIndex()); |
+ |
+ if (earliest_used_index < 0) |
+ return; // Nothing to remove |
+ |
+ // Assuming |playback_rate_| * 100 == floor(|playback_rate_| * 100) |
+ // that is |playback_rate_| is represented by 2 decimal digits, only. |
+ // We eliminate blocks of size 100 * |playback_rate_| from input. |
+ const int kOutputFramesPerBlock = 100; |
+ const int input_frames_per_block = |
+ static_cast<int>(floor(playback_rate_ * kOutputFramesPerBlock + 0.5f)); |
+ const int blocks_to_remove = earliest_used_index / input_frames_per_block; |
+ const int input_frames_to_remove = input_frames_per_block * blocks_to_remove; |
+ |
+ // Remove frames from input and adjust indices accordingly. |
+ audio_buffer_.SeekFrames(input_frames_to_remove); |
+ target_block_index_ -= input_frames_to_remove; |
+ |
+ // Adjust output index. |
+ output_index_ -= kOutputFramesPerBlock * blocks_to_remove; |
+ DCHECK_GE(output_index_, 0); |
} |
-void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) { |
- DCHECK_GE(new_rate, 0); |
- playback_rate_ = new_rate; |
- muted_ = |
- playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate; |
+int AudioRendererAlgorithm::ReadWsolaOutput( |
+ int requested_frames, int dest_offset, AudioBus* dest) { |
+ int rendered_frames = std::min(num_complete_frames_, requested_frames); |
- ResetWindow(); |
-} |
+ if (rendered_frames == 0) |
+ return 0; // There is nothing to read from |wsola_output_|, return. |
-void AudioRendererAlgorithm::FlushBuffers() { |
- ResetWindow(); |
+ wsola_output_->CopyPartialFramesTo(0, rendered_frames, dest_offset, dest); |
- // Clear the queue of decoded packets (releasing the buffers). |
- audio_buffer_.Clear(); |
+ // Remove the frames which are read. |
+ int frames_to_move = wsola_output_->frames() - rendered_frames; |
+ for (int k = 0; k < channels_; ++k) { |
+ float* ch = wsola_output_->channel(k); |
+ memmove(ch, &ch[rendered_frames], sizeof(*ch) * frames_to_move); |
+ } |
+ num_complete_frames_ -= rendered_frames; |
+ return rendered_frames; |
} |
-base::TimeDelta AudioRendererAlgorithm::GetTime() { |
- return audio_buffer_.current_time(); |
-} |
+bool AudioRendererAlgorithm::TargetIsWithinSearchRegion() const { |
+ const int search_block_index = GetSearchRegionIndex(); |
+ const int search_block_size = num_candidate_frames_ + (ola_window_size_ - 1); |
-void AudioRendererAlgorithm::EnqueueBuffer( |
- const scoped_refptr<AudioBuffer>& buffer_in) { |
- DCHECK(!buffer_in->end_of_stream()); |
- audio_buffer_.Append(buffer_in); |
+ if (target_block_index_ >= search_block_index && |
+ target_block_index_ + ola_window_size_ <= |
+ search_block_index + search_block_size) { |
+ return true; |
+ } |
+ return false; |
} |
-bool AudioRendererAlgorithm::IsQueueFull() { |
- return audio_buffer_.frames() >= capacity_; |
+bool AudioRendererAlgorithm::GetOptimalBlock() { |
+ int optimal_index = 0; |
+ if (TargetIsWithinSearchRegion()) { |
+ optimal_index = target_block_index_; |
+ // Get the optimal window. |
+ if (!PeekAudioWithZeroAppend(optimal_index, optimal_block_.get())) |
+ return false; |
+ } else { |
+ if (!PeekAudioWithZeroAppend(target_block_index_, target_block_.get())) |
+ return false; |
+ const int search_block_index = GetSearchRegionIndex(); |
+ |
+ if (!PeekAudioWithZeroAppend(search_block_index, search_block_.get())) |
+ return false; |
+ |
+ int last_optimal = target_block_index_ - ola_hop_size_ - |
+ search_block_index; |
+ internal::Interval exclude_iterval = std::make_pair(last_optimal - 80, |
+ last_optimal + 80); |
+ // |optimal_index| is in frames and it is relative to the beginning |
+ // of the |search_block_|. |
+ optimal_index = internal::OptimalIndex( |
+ search_block_.get(), target_block_.get(), exclude_iterval); |
+ |
+ // Translate |index| w.r.t. the beginning of |audio_buffer_|. |
+ optimal_index += search_block_index; |
+ |
+ // Get the optimal window. |
+ PeekAudioWithZeroAppend(optimal_index, optimal_block_.get()); |
+ |
+ // Make a transition from target block to the optimal block if different. |
+ // Target block has the best continuation to the current output. |
+ // Optimal block is the most similar block to the target, however, it might |
+ // introduce some discontinuity when over-lap-added. Therefore, we combine |
+ // them for a smoother transition. The length of transition window is twice |
+ // as the optimal-block which makes it like a weighting function where |
marpan
2013/08/06 17:14:10
"that of" the optimal-block
turaj
2013/08/06 23:29:27
Done.
|
+ // target-block has higher weight close to zero (weight of 1 at index 0) |
+ // and lower weight close the end. |
+ for (int k = 0; k < channels_; ++k) { |
+ float* ch_opt = optimal_block_->channel(k); |
+ float* ch_target = target_block_->channel(k); |
+ for (int n = 0; n < ola_window_size_; ++n) { |
+ ch_opt[n] = ch_opt[n] * transition_window_[n] + ch_target[n] * |
+ transition_window_[ola_window_size_ + n]; |
+ } |
+ } |
+ } |
+ |
+ // Next target is one hop ahead of the current optimal. |
+ target_block_index_ = optimal_index + ola_hop_size_; |
+ return true; |
} |
-void AudioRendererAlgorithm::IncreaseQueueCapacity() { |
- capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames); |
+bool AudioRendererAlgorithm::PeekAudioWithZeroAppend( |
+ int read_offset_frames, AudioBus* dest) { |
+ int num_frames = dest->frames(); |
+ if (read_offset_frames + num_frames > audio_buffer_.frames()) |
+ return false; |
+ |
+ int write_offset = 0; |
+ int num_frames_to_read = dest->frames(); |
+ if (read_offset_frames < 0) { |
+ int num_zero_frames_appended = std::min(-read_offset_frames, |
+ num_frames_to_read); |
+ read_offset_frames = 0; |
+ num_frames_to_read -= num_zero_frames_appended; |
+ write_offset = num_zero_frames_appended; |
+ dest->ZeroFrames(num_zero_frames_appended); |
+ } |
+ audio_buffer_.PeekFrames(num_frames_to_read, read_offset_frames, |
+ write_offset, dest); |
+ return true; |
} |
} // namespace media |