Index: media/filters/audio_clock.cc |
diff --git a/media/filters/audio_clock.cc b/media/filters/audio_clock.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..2b5e843dd24a1cec62e773b189e67eb6c674672a |
--- /dev/null |
+++ b/media/filters/audio_clock.cc |
@@ -0,0 +1,139 @@ |
+// Copyright 2014 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "media/filters/audio_clock.h" |
+ |
+#include "base/logging.h" |
+#include "media/base/buffers.h" |
+ |
+namespace media { |
+ |
+AudioClock::AudioClock(int sample_rate) |
+ : sample_rate_(sample_rate), last_endpoint_timestamp_(kNoTimestamp()) { |
DaleCurtis
2014/04/30 20:36:33
Instead of storing sample_rate_ you could store a
scherkus (not reviewing)
2014/05/02 19:26:05
opted to combine the expression in CurrentMediaTim
|
+} |
+ |
+AudioClock::~AudioClock() { |
+} |
+ |
+void AudioClock::WroteAudio(int frames, |
+ int delay_frames, |
+ float playback_rate, |
+ base::TimeDelta timestamp) { |
+ CHECK_GT(playback_rate, 0); |
DaleCurtis
2014/04/30 20:36:33
DCHECK_GE(frames, 0) ? Add to WroteSilence() as we
scherkus (not reviewing)
2014/05/02 19:26:05
Done.
|
+ CHECK(timestamp != kNoTimestamp()); |
+ |
+ if (last_endpoint_timestamp_ == kNoTimestamp()) |
+ PushBufferedAudio(delay_frames, 0, kNoTimestamp()); |
+ |
+ TrimBufferedAudioToMatchDelay(delay_frames); |
+ PushBufferedAudio(frames, playback_rate, timestamp); |
+ |
+ last_endpoint_timestamp_ = timestamp; |
+} |
+ |
+void AudioClock::WroteSilence(int frames, int delay_frames) { |
+ if (last_endpoint_timestamp_ == kNoTimestamp()) |
+ PushBufferedAudio(delay_frames, 0, kNoTimestamp()); |
+ |
+ TrimBufferedAudioToMatchDelay(delay_frames); |
+ PushBufferedAudio(frames, 0, kNoTimestamp()); |
+} |
+ |
+base::TimeDelta AudioClock::CurrentMediaTimestamp() const { |
+ base::TimeDelta silence; |
+ for (size_t i = 0; i < buffered_audio_.size(); ++i) { |
+ // Account for silence ahead of the buffer closest to being played. |
+ if (buffered_audio_[i].playback_rate == 0) { |
+ silence += base::TimeDelta::FromMicroseconds( |
DaleCurtis
2014/04/30 20:36:33
Instead of repeatedly converting this, it'd be bet
scherkus (not reviewing)
2014/05/02 19:26:05
Done.
|
+ base::Time::kMicrosecondsPerSecond * buffered_audio_[i].frames / |
+ sample_rate_); |
+ continue; |
+ } |
+ |
+ // Multiply by playback rate as frames represent time-scaled audio. |
+ base::TimeDelta current_media_time = buffered_audio_[i].endpoint_timestamp; |
DaleCurtis
2014/04/30 20:36:33
You could write this as a single return statement.
scherkus (not reviewing)
2014/05/02 19:26:05
Done.
|
+ current_media_time -= base::TimeDelta::FromMicroseconds( |
+ base::Time::kMicrosecondsPerSecond * buffered_audio_[i].frames * |
+ buffered_audio_[i].playback_rate / sample_rate_); |
+ current_media_time -= silence; |
+ return current_media_time; |
+ } |
+ |
+ // Either: |
+ // 1) AudioClock is uninitialziated and we'll return kNoTimestamp() |
+ // 2) All previously buffered audio has been replaced by silence, |
+ // meaning media time is now at the last endpoint |
+ return last_endpoint_timestamp_; |
+} |
+ |
+base::TimeDelta AudioClock::LastEndpointTimestamp() const { |
DaleCurtis
2014/04/30 20:36:33
hacker_style() if you want.
scherkus (not reviewing)
2014/05/02 19:26:05
Done.
|
+ return last_endpoint_timestamp_; |
+} |
+ |
+void AudioClock::TrimBufferedAudioToMatchDelay(int delay_frames) { |
+ if (buffered_audio_.empty()) |
+ return; |
+ |
+ size_t i = buffered_audio_.size() - 1; |
+ while (true) { |
+ if (buffered_audio_[i].frames <= delay_frames) { |
+ // Reached the end before accounting for all of |delay_frames|. This |
DaleCurtis
2014/04/30 20:36:33
Reflow comment block. Line breaks are off.
scherkus (not reviewing)
2014/05/02 19:26:05
Done.
|
+ // means |
+ // we haven't written enough audio data yet to account for hardware |
+ // delay. |
+ // In this case, do nothing. |
+ if (i == 0) |
+ return; |
+ |
+ // Keep accounting for |delay_frames|. |
+ delay_frames -= buffered_audio_[i].frames; |
+ --i; |
+ continue; |
+ } |
+ |
+ // All of |delay_frames| has been accounted for: adjust amount of frames |
+ // left in current buffer. All preceeding elements with index < |i| should |
+ // be considered played out and hence discared. |
DaleCurtis
2014/04/30 20:36:33
discarded
scherkus (not reviewing)
2014/05/02 19:26:05
Done.
|
+ buffered_audio_[i].frames = delay_frames; |
DaleCurtis
2014/04/30 20:36:33
Is this right? delay_frames may be 0 now. Shouldn
scherkus (not reviewing)
2014/05/02 19:26:05
Say we have: [20, 20, 20] with a delay of 45, we g
DaleCurtis
2014/05/02 19:37:06
Oh I see, I had this turned around and was conside
|
+ break; |
+ } |
+ |
+ // At this point |i| points at what will be the new head of |
DaleCurtis
2014/04/30 20:36:33
Reflow comment block.
scherkus (not reviewing)
2014/05/02 19:26:05
Done.
|
+ // |buffered_audio_| |
+ // however if it contains no audio it should be removed as well. |
+ if (buffered_audio_[i].frames == 0) |
+ ++i; |
+ |
+ buffered_audio_.erase(buffered_audio_.begin(), buffered_audio_.begin() + i); |
+} |
+ |
+void AudioClock::PushBufferedAudio(int frames, |
+ float playback_rate, |
+ base::TimeDelta endpoint_timestamp) { |
+ DCHECK_EQ(playback_rate == 0, endpoint_timestamp == kNoTimestamp()); |
DaleCurtis
2014/04/30 20:36:33
These DCHECKS are kind of a pain when they fire. I
scherkus (not reviewing)
2014/05/02 19:26:05
Done.
|
+ |
+ if (frames == 0) |
+ return; |
+ |
+ // Avoid creating extra elements where possible. |
+ if (!buffered_audio_.empty() && |
+ buffered_audio_.back().playback_rate == playback_rate) { |
+ buffered_audio_.back().frames += frames; |
+ buffered_audio_.back().endpoint_timestamp = endpoint_timestamp; |
+ return; |
+ } |
+ |
+ buffered_audio_.push_back( |
+ BufferedAudio(frames, playback_rate, endpoint_timestamp)); |
+} |
+ |
+AudioClock::BufferedAudio::BufferedAudio(int frames, |
+ float playback_rate, |
+ base::TimeDelta endpoint_timestamp) |
+ : frames(frames), |
+ playback_rate(playback_rate), |
+ endpoint_timestamp(endpoint_timestamp) { |
+} |
+ |
+} // namespace media |