Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(357)

Unified Diff: media/filters/audio_renderer_algorithm.cc

Issue 19111004: Upgrade AudioRendererAlgorithm to use WSOLA, (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: media/filters/audio_renderer_algorithm.cc
diff --git a/media/filters/audio_renderer_algorithm.cc b/media/filters/audio_renderer_algorithm.cc
index 97f0811384159ff35868bb45f10eabdc7a94d86b..18342e2701cd27b973934c0b3401c6aad009fb6b 100644
--- a/media/filters/audio_renderer_algorithm.cc
+++ b/media/filters/audio_renderer_algorithm.cc
@@ -12,41 +12,47 @@
#include "media/audio/audio_util.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
+#include "media/filters/audio_renderer_algorithm_util.h"
namespace media {
-// The starting size in frames for |audio_buffer_|. Previous usage maintained a
-// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we
-// maintain this number of frames.
-static const int kStartingBufferSizeInFrames = 16 * 512;
-
// The maximum size in frames for the |audio_buffer_|. Arbitrarily determined.
// This number represents 3 seconds of 96kHz/16 bit 7.1 surround sound.
static const int kMaxBufferSizeInFrames = 3 * 96000;
-// Duration of audio segments used for crossfading (in seconds).
-static const double kWindowDuration = 0.08;
-
-// Duration of crossfade between audio segments (in seconds).
-static const double kCrossfadeDuration = 0.008;
-
// Max/min supported playback rates for fast/slow audio. Audio outside of these
// ranges are muted.
// Audio at these speeds would sound better under a frequency domain algorithm.
static const float kMinPlaybackRate = 0.5f;
static const float kMaxPlaybackRate = 4.0f;
+// Overlap-and-add window size in milliseconds.
+static const int kOlaWindowSizeMs = 25;
+
+// Size of search interval in milliseconds. The search interval is
+// [-delta delta] around |output_index_| * |playback_rate_|. So the search
+// interval is 2 * delta.
+static const int kWsolaSearchIntervalMs = 30;
+
+// The starting size in frames for |audio_buffer_|. Previous usage maintained a
+// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we
+// maintain this number of frames.
+static const int kStartingBufferSizeInFrames = 16 * 512;
ajm 2013/07/23 18:03:28 I assume this is the "frames as in samples" usage.
turaj 2013/07/29 22:09:57 The notion of frame, where frame _N_ is the set of
+
AudioRendererAlgorithm::AudioRendererAlgorithm()
: channels_(0),
samples_per_second_(0),
playback_rate_(0),
- frames_in_crossfade_(0),
- index_into_window_(0),
- crossfade_frame_number_(0),
muted_(false),
muted_partial_frame_(0),
- window_size_(0),
- capacity_(kStartingBufferSizeInFrames) {
+ capacity_(kStartingBufferSizeInFrames),
+ output_index_(0),
+ search_region_center_offset_(0),
+ num_candid_frames_(0),
+ target_window_index_(0),
+ ola_window_size_(0),
+ ola_hop_size_(0),
+ num_complete_frames_(0) {
}
AudioRendererAlgorithm::~AudioRendererAlgorithm() {}
@@ -59,9 +65,25 @@ void AudioRendererAlgorithm::Initialize(float initial_playback_rate,
samples_per_second_ = params.sample_rate();
SetPlaybackRate(initial_playback_rate);
- window_size_ = samples_per_second_ * kWindowDuration;
- frames_in_crossfade_ = samples_per_second_ * kCrossfadeDuration;
- crossfade_buffer_ = AudioBus::Create(channels_, frames_in_crossfade_);
+ num_candid_frames_ =
+ (kWsolaSearchIntervalMs * samples_per_second_) / 1000 + 1;
ajm 2013/07/23 18:03:28 What's the +1 for?
turaj 2013/07/29 22:09:57 To make the search region symmetric around |output
+
+ // Make sure window size in an even number.
+ ola_window_size_ = static_cast<int>(
+ floor(kOlaWindowSizeMs * samples_per_second_ / 1000 / 2)) * 2;
ajm 2013/07/23 18:03:28 These are all integers, right? Shouldn't need the
turaj 2013/07/29 22:09:57 Right. On 2013/07/23 18:03:28, ajm wrote:
+
+ ola_hop_size_ = ola_window_size_ / 2;
+
+ search_region_center_offset_ = (num_candid_frames_ - 1) / 2 + (
+ ola_window_size_ / 2 - 1);
+
+ ola_window_.reset(new float[ola_window_size_]);
+ HannSym(ola_window_size_, ola_window_.get());
+
+ transition_window_.reset(new float[ola_window_size_ * 2]);
+ HannSym(2 * ola_window_size_, transition_window_.get());
+
+ wsola_output_ = AudioBus::Create(channels_, ola_window_size_ + ola_hop_size_);
}
int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) {
@@ -93,12 +115,12 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) {
return frames_to_render;
}
- int slower_step = ceil(window_size_ * playback_rate_);
- int faster_step = ceil(window_size_ / playback_rate_);
+ int slower_step = ceil(ola_window_size_ * playback_rate_);
+ int faster_step = ceil(ola_window_size_ / playback_rate_);
// Optimize the most common |playback_rate_| ~= 1 case to use a single copy
// instead of copying frame by frame.
- if (window_size_ <= faster_step && slower_step >= window_size_) {
+ if (ola_window_size_ <= faster_step && slower_step >= ola_window_size_) {
const int frames_to_copy =
std::min(audio_buffer_.frames(), requested_frames);
const int frames_read = audio_buffer_.ReadFrames(frames_to_copy, 0, dest);
@@ -106,277 +128,232 @@ int AudioRendererAlgorithm::FillBuffer(AudioBus* dest, int requested_frames) {
return frames_read;
}
- int total_frames_rendered = 0;
- while (total_frames_rendered < requested_frames) {
- if (index_into_window_ >= window_size_)
- ResetWindow();
-
- int rendered_frames = 0;
- if (window_size_ > faster_step) {
- rendered_frames =
- OutputFasterPlayback(dest,
- total_frames_rendered,
- requested_frames - total_frames_rendered,
- window_size_,
- faster_step);
- } else if (slower_step < window_size_) {
- rendered_frames =
- OutputSlowerPlayback(dest,
- total_frames_rendered,
- requested_frames - total_frames_rendered,
- slower_step,
- window_size_);
- } else {
- NOTREACHED();
- }
+ int total_frames_rendered = WsolaOutput(requested_frames, dest);
+ return total_frames_rendered;
+}
- if (rendered_frames == 0)
- break;
+void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) {
+ DCHECK_GE(new_rate, 0);
+ playback_rate_ = new_rate;
+ // Round it to two decimal digits.
+ playback_rate_ = floor(playback_rate_ * 100.f + 0.5f) / 100;
ajm 2013/07/23 18:03:28 Just use new_rate here directly. Why do you have t
turaj 2013/07/29 22:09:57 Truncation is needed when it comes to removing fra
+ muted_ =
+ playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate;
+}
- total_frames_rendered += rendered_frames;
- }
- return total_frames_rendered;
+void AudioRendererAlgorithm::FlushBuffers() {
+ // Clear the queue of decoded packets (releasing the buffers).
+ audio_buffer_.Clear();
+ output_index_ = 0;
+ target_window_index_ = 0;
+ wsola_output_->Zero();
+ num_complete_frames_ = 0;
}
-void AudioRendererAlgorithm::ResetWindow() {
- DCHECK_LE(index_into_window_, window_size_);
- index_into_window_ = 0;
- crossfade_frame_number_ = 0;
+base::TimeDelta AudioRendererAlgorithm::GetTime() {
+ return audio_buffer_.current_time();
}
-int AudioRendererAlgorithm::OutputFasterPlayback(AudioBus* dest,
- int dest_offset,
- int requested_frames,
- int input_step,
- int output_step) {
- // Ensure we don't run into OOB read/write situation.
- CHECK_GT(input_step, output_step);
- DCHECK_LT(index_into_window_, window_size_);
- DCHECK_GT(playback_rate_, 1.0);
- DCHECK(!muted_);
-
- if (audio_buffer_.frames() < 1)
- return 0;
+void AudioRendererAlgorithm::EnqueueBuffer(
+ const scoped_refptr<AudioBuffer>& buffer_in) {
+ DCHECK(!buffer_in->end_of_stream());
+ audio_buffer_.Append(buffer_in);
+}
- // The audio data is output in a series of windows. For sped-up playback,
- // the window is comprised of the following phases:
- //
- // a) Output raw data.
- // b) Save bytes for crossfade in |crossfade_buffer_|.
- // c) Drop data.
- // d) Output crossfaded audio leading up to the next window.
- //
- // The duration of each phase is computed below based on the |window_size_|
- // and |playback_rate_|.
- DCHECK_LE(frames_in_crossfade_, output_step);
-
- // This is the index of the end of phase a, beginning of phase b.
- int outtro_crossfade_begin = output_step - frames_in_crossfade_;
-
- // This is the index of the end of phase b, beginning of phase c.
- int outtro_crossfade_end = output_step;
-
- // This is the index of the end of phase c, beginning of phase d.
- // This phase continues until |index_into_window_| reaches |window_size_|, at
- // which point the window restarts.
- int intro_crossfade_begin = input_step - frames_in_crossfade_;
-
- // a) Output raw frames if we haven't reached the crossfade section.
- if (index_into_window_ < outtro_crossfade_begin) {
- // Read as many frames as we can and return the count. If it's not enough,
- // we will get called again.
- const int frames_to_copy =
- std::min(requested_frames, outtro_crossfade_begin - index_into_window_);
- int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest);
- index_into_window_ += copied;
- return copied;
- }
+bool AudioRendererAlgorithm::IsQueueFull() {
+ return audio_buffer_.frames() >= capacity_;
+}
+
+void AudioRendererAlgorithm::IncreaseQueueCapacity() {
+ capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames);
+}
- // b) Save outtro crossfade frames into intermediate buffer, but do not output
- // anything to |dest|.
- if (index_into_window_ < outtro_crossfade_end) {
- // This phase only applies if there are bytes to crossfade.
- DCHECK_GT(frames_in_crossfade_, 0);
- int crossfade_start = index_into_window_ - outtro_crossfade_begin;
- int crossfade_count = outtro_crossfade_end - index_into_window_;
- int copied = audio_buffer_.ReadFrames(
- crossfade_count, crossfade_start, crossfade_buffer_.get());
- index_into_window_ += copied;
-
- // Did we get all the frames we need? If not, return and let subsequent
- // calls try to get the rest.
- if (copied != crossfade_count)
- return 0;
+bool AudioRendererAlgorithm::CanPerformWsola() const {
+ const int search_region_size = num_candid_frames_ + (ola_window_size_ - 1);
+ const int frames = audio_buffer_.frames();
+ if (target_window_index_ + ola_window_size_ <= frames &&
+ GetSearchRegionIndex() + search_region_size <= frames) {
+ return true;
}
+ return false;
+}
- // c) Drop frames until we reach the intro crossfade section.
- if (index_into_window_ < intro_crossfade_begin) {
- // Check if there is enough data to skip all the frames needed. If not,
- // return 0 and let subsequent calls try to skip it all.
- int seek_frames = intro_crossfade_begin - index_into_window_;
- if (audio_buffer_.frames() < seek_frames)
- return 0;
- audio_buffer_.SeekFrames(seek_frames);
+int AudioRendererAlgorithm::WsolaOutput(int requested_frames, AudioBus* dest) {
+ DCHECK(channels_ == dest->channels());
- // We've dropped all the frames that need to be dropped.
- index_into_window_ += seek_frames;
+ int rendered_frames = ReadWsolaOutput(requested_frames, 0, dest);
+ while (rendered_frames < requested_frames && CanPerformWsola()) {
+ Wsola();
+ rendered_frames += ReadWsolaOutput(requested_frames - rendered_frames,
ajm 2013/07/23 18:03:28 Do you need to break these functions up?
turaj 2013/07/29 22:09:57 I can define "int Wsola(requested_frames, int outp
+ rendered_frames, dest);
}
-
- // d) Crossfade and output a frame, as long as we have data.
- if (audio_buffer_.frames() < 1)
- return 0;
- DCHECK_GT(frames_in_crossfade_, 0);
- DCHECK_LT(index_into_window_, window_size_);
-
- int offset_into_buffer = index_into_window_ - intro_crossfade_begin;
- int copied = audio_buffer_.ReadFrames(1, dest_offset, dest);
- DCHECK_EQ(copied, 1);
- CrossfadeFrame(crossfade_buffer_.get(),
- offset_into_buffer,
- dest,
- dest_offset,
- offset_into_buffer);
- index_into_window_ += copied;
- return copied;
+ return rendered_frames;
}
-int AudioRendererAlgorithm::OutputSlowerPlayback(AudioBus* dest,
- int dest_offset,
- int requested_frames,
- int input_step,
- int output_step) {
- // Ensure we don't run into OOB read/write situation.
- CHECK_LT(input_step, output_step);
- DCHECK_LT(index_into_window_, window_size_);
- DCHECK_LT(playback_rate_, 1.0);
- DCHECK_NE(playback_rate_, 0);
- DCHECK(!muted_);
-
- if (audio_buffer_.frames() < 1)
- return 0;
+void AudioRendererAlgorithm::Wsola() {
+ // Holds the optimal Frame.
+ scoped_ptr<AudioBus> optimal_frame = AudioBus::Create(
ajm 2013/07/23 18:03:28 I'm not sure how AudioBus works, but do you want t
turaj 2013/07/29 22:09:57 It is not very expensive basically one malloc with
DaleCurtis 2013/07/29 23:48:32 malloc is very expensive relative to the rest of t
+ channels_, ola_window_size_);
+ GetOptimalBlock(optimal_frame.get());
+
+ // Overlap-and-add.
+ for(int k = 0; k < channels_; ++k) {
+ float* ch_opt_frame = optimal_frame->channel(k);
+ float* ch_output = wsola_output_->channel(k) + num_complete_frames_;
+ for (int n = 0; n < ola_hop_size_; ++n) {
+ ch_output[n] = ch_output[n] * ola_window_[ola_hop_size_ + n] +
+ ch_opt_frame[n] * ola_window_[n];
+ }
- // The audio data is output in a series of windows. For slowed down playback,
- // the window is comprised of the following phases:
- //
- // a) Output raw data.
- // b) Output and save bytes for crossfade in |crossfade_buffer_|.
- // c) Output* raw data.
- // d) Output* crossfaded audio leading up to the next window.
- //
- // * Phases c) and d) do not progress |audio_buffer_|'s cursor so that the
- // |audio_buffer_|'s cursor is in the correct place for the next window.
- //
- // The duration of each phase is computed below based on the |window_size_|
- // and |playback_rate_|.
- DCHECK_LE(frames_in_crossfade_, input_step);
-
- // This is the index of the end of phase a, beginning of phase b.
- int intro_crossfade_begin = input_step - frames_in_crossfade_;
-
- // This is the index of the end of phase b, beginning of phase c.
- int intro_crossfade_end = input_step;
-
- // This is the index of the end of phase c, beginning of phase d.
- // This phase continues until |index_into_window_| reaches |window_size_|, at
- // which point the window restarts.
- int outtro_crossfade_begin = output_step - frames_in_crossfade_;
-
- // a) Output raw frames.
- if (index_into_window_ < intro_crossfade_begin) {
- // Read as many frames as we can and return the count. If it's not enough,
- // we will get called again.
- const int frames_to_copy =
- std::min(requested_frames, intro_crossfade_begin - index_into_window_);
- int copied = audio_buffer_.ReadFrames(frames_to_copy, dest_offset, dest);
- index_into_window_ += copied;
- return copied;
+ // Copy the second half to the output.
+ memcpy(&ch_output[ola_hop_size_], &ch_opt_frame[ola_hop_size_],
+ sizeof(*ch_opt_frame) * ola_hop_size_);
}
- // b) Save the raw frames for the intro crossfade section, then copy the
- // same frames to |dest|.
- if (index_into_window_ < intro_crossfade_end) {
- const int frames_to_copy =
- std::min(requested_frames, intro_crossfade_end - index_into_window_);
- int offset = index_into_window_ - intro_crossfade_begin;
- int copied = audio_buffer_.ReadFrames(
- frames_to_copy, offset, crossfade_buffer_.get());
- crossfade_buffer_->CopyPartialFramesTo(offset, copied, dest_offset, dest);
- index_into_window_ += copied;
- return copied;
- }
+ num_complete_frames_ += ola_hop_size_;
+ output_index_ += ola_hop_size_;
- // c) Output a raw frame into |dest| without advancing the |audio_buffer_|
- // cursor.
- int audio_buffer_offset = index_into_window_ - intro_crossfade_end;
- DCHECK_GE(audio_buffer_offset, 0);
- if (audio_buffer_.frames() <= audio_buffer_offset)
- return 0;
- int copied =
- audio_buffer_.PeekFrames(1, audio_buffer_offset, dest_offset, dest);
- DCHECK_EQ(1, copied);
-
- // d) Crossfade the next frame of |crossfade_buffer_| into |dest| if we've
- // reached the outtro crossfade section of the window.
- if (index_into_window_ >= outtro_crossfade_begin) {
- int offset_into_crossfade_buffer =
- index_into_window_ - outtro_crossfade_begin;
- CrossfadeFrame(dest,
- dest_offset,
- crossfade_buffer_.get(),
- offset_into_crossfade_buffer,
- offset_into_crossfade_buffer);
- }
+ RemoveOldInputFrames();
+}
+
+int AudioRendererAlgorithm::GetSearchRegionIndex() const {
+ // Center of the search region, in frames.
+ const int search_region_center_index = static_cast<int>(floor(
+ output_index_ * playback_rate_ + 0.5));
- index_into_window_ += copied;
- return copied;
+ // Index of the beginning of the search region, in frames.
+ return search_region_center_index - search_region_center_offset_;
}
-void AudioRendererAlgorithm::CrossfadeFrame(AudioBus* intro,
- int intro_offset,
- AudioBus* outtro,
- int outtro_offset,
- int fade_offset) {
- float crossfade_ratio =
- static_cast<float>(fade_offset) / frames_in_crossfade_;
- for (int channel = 0; channel < channels_; ++channel) {
- outtro->channel(channel)[outtro_offset] =
- (1.0f - crossfade_ratio) * intro->channel(channel)[intro_offset] +
- (crossfade_ratio) * outtro->channel(channel)[outtro_offset];
- }
+void AudioRendererAlgorithm::RemoveOldInputFrames() {
+ const int earliest_used_index = std::min(target_window_index_,
+ GetSearchRegionIndex());
+
+ if (earliest_used_index < 0)
+ return; // Nothing to remove
+
+ // Assuming |playback_rate_| * 100 == floor(|playback_rate_| * 100)
+ // that is |playback_rate_| is represented by 2 decimal digits, only.
+ // We eliminate blocks of size 100 * |playback_rate_| from input.
+ const int kOutputFramesPerBlock = 100;
+ const int input_frames_per_block =
+ static_cast<int>(floor(playback_rate_ * kOutputFramesPerBlock + 0.5f));
+ const int blocks_to_remove = earliest_used_index / input_frames_per_block;
+ const int input_frames_to_remove = input_frames_per_block * blocks_to_remove;
+
+ // Remove frames from input and adjust indices accordingly.
+ audio_buffer_.SeekFrames(input_frames_to_remove);
+ target_window_index_ -= input_frames_to_remove;
+
+ // Adjust output index.
+ output_index_ -= kOutputFramesPerBlock * blocks_to_remove;
+ DCHECK(output_index_ >= 0);
}
-void AudioRendererAlgorithm::SetPlaybackRate(float new_rate) {
- DCHECK_GE(new_rate, 0);
- playback_rate_ = new_rate;
- muted_ =
- playback_rate_ < kMinPlaybackRate || playback_rate_ > kMaxPlaybackRate;
+int AudioRendererAlgorithm::ReadWsolaOutput(
+ int requested_frames, int output_offset, AudioBus* dest) {
+ int rendered_frames = std::min(num_complete_frames_, requested_frames);
- ResetWindow();
-}
+ if (rendered_frames == 0)
+ return 0; // There is nothing to read from |wsola_output_|, return.
-void AudioRendererAlgorithm::FlushBuffers() {
- ResetWindow();
+ wsola_output_->CopyPartialFramesTo(0, rendered_frames, output_offset, dest);
- // Clear the queue of decoded packets (releasing the buffers).
- audio_buffer_.Clear();
+ // Remove the frames which are read.
+ int frames_to_move = wsola_output_->frames() - rendered_frames;
+ for (int k = 0; k < channels_; ++k) {
+ float* ch = wsola_output_->channel(k);
+ memmove(ch, &ch[rendered_frames], sizeof(*ch) * frames_to_move);
+ }
+ num_complete_frames_ -= rendered_frames;
+ return rendered_frames;
}
-base::TimeDelta AudioRendererAlgorithm::GetTime() {
- return audio_buffer_.current_time();
-}
+bool AudioRendererAlgorithm::TargetIsWithinSearchRegion() const {
+ const int search_region_index = GetSearchRegionIndex();
+ const int search_region_size = num_candid_frames_ + (ola_window_size_ - 1);
-void AudioRendererAlgorithm::EnqueueBuffer(
- const scoped_refptr<AudioBuffer>& buffer_in) {
- DCHECK(!buffer_in->end_of_stream());
- audio_buffer_.Append(buffer_in);
+ if (target_window_index_ >= search_region_index &&
+ target_window_index_ + ola_window_size_ <=
+ search_region_index + search_region_size) {
+ return true;
+ }
+ return false;
}
-bool AudioRendererAlgorithm::IsQueueFull() {
- return audio_buffer_.frames() >= capacity_;
+void AudioRendererAlgorithm::GetOptimalBlock(AudioBus* optimal_block) {
+ int optimal_index = 0;
+ if (TargetIsWithinSearchRegion()) {
+ optimal_index = target_window_index_;
+ // Get the optimal window.
+ PeekAudioWithZerroAppend(optimal_index, optimal_block);
+ } else {
+ // Holds the target window.
+ scoped_ptr<AudioBus> target_window = AudioBus::Create(
+ channels_, ola_window_size_);
+ PeekAudioWithZerroAppend(target_window_index_, target_window.get());
+
+ const int search_region_index = GetSearchRegionIndex();
+
+ // Holds a segment of the signal that similarity measure is operated upon.
+ scoped_ptr<AudioBus> search_segment = AudioBus::Create(
+ channels_, num_candid_frames_ + (ola_window_size_ - 1));
+ PeekAudioWithZerroAppend(search_region_index, search_segment.get());
+
+ int last_optimal = target_window_index_ - ola_hop_size_ -
+ search_region_index;
+ interval exclude_iterval = std::make_pair(last_optimal - 80,
+ last_optimal + 80);
+
+ // |optimal_index| is in frames and it is relative to the beginning
+ // of the |search_segment|.
+ optimal_index = OptimalIndex(search_segment.get(), target_window.get(),
+ exclude_iterval);
+
+ // Translate |index| w.r.t. the beginning of |audio_buffer_|.
+ optimal_index += search_region_index;
+
+ // Get the optimal window.
+ PeekAudioWithZerroAppend(optimal_index, optimal_block);
ajm 2013/07/23 18:03:28 Zerro -> Zero
turaj 2013/07/29 22:09:57 Done.
+
+ // Make a transition from target window to the optimal window if different.
+ // Target window has the best continuation to the current current output.
+ // Optimal block is the most similar block to the target, however, it might
+ // introduce some discontinuity when over-lap-added. Therefore, we combine
+ // them for a smoother transition.
+ for (int k = 0; k < channels_; ++k) {
+ float* ch_opt = optimal_block->channel(k);
+ float* ch_target = target_window->channel(k);
+ for (int n = 0; n < ola_window_size_; ++n) {
+ ch_opt[n] = ch_opt[n] * transition_window_[n] + ch_target[n] *
+ transition_window_[ola_window_size_ + n];
+ }
+ }
+ }
+
+ // Next target is one hop ahead of the current optimal.
+ target_window_index_ = optimal_index + ola_hop_size_;
}
-void AudioRendererAlgorithm::IncreaseQueueCapacity() {
- capacity_ = std::min(2 * capacity_, kMaxBufferSizeInFrames);
+bool AudioRendererAlgorithm::PeekAudioWithZerroAppend(
ajm 2013/07/23 18:03:28 You don't check the return value of this anywhere.
turaj 2013/07/29 22:09:57 I decided to check it and propagate result. On 2
+ int read_offset_frames, AudioBus* dest) {
+ int num_frames = dest->frames();
+ if (read_offset_frames + num_frames > audio_buffer_.frames())
+ return false;
+
+ int write_offset = 0;
+ int num_frames_to_read = dest->frames();
+ if (read_offset_frames < 0) {
+ int num_zero_frames_appended = std::min(-read_offset_frames,
+ num_frames_to_read);
+ read_offset_frames = 0;
+ num_frames_to_read -= num_zero_frames_appended;
+ write_offset = num_zero_frames_appended;
+ dest->ZeroFrames(num_zero_frames_appended);
+ }
+ audio_buffer_.PeekFrames(num_frames_to_read, read_offset_frames,
+ write_offset, dest);
+ return true;
}
} // namespace media

Powered by Google App Engine
This is Rietveld 408576698