Index: media/filters/audio_clock.cc |
diff --git a/media/filters/audio_clock.cc b/media/filters/audio_clock.cc |
index e315fa31e2d7084218136c5266461fa23ec92f90..a021370796ddf7427d49bbab0a0a5c78d8293e74 100644 |
--- a/media/filters/audio_clock.cc |
+++ b/media/filters/audio_clock.cc |
@@ -9,139 +9,153 @@ |
namespace media { |
-AudioClock::AudioClock(int sample_rate) |
- : sample_rate_(sample_rate), last_endpoint_timestamp_(kNoTimestamp()) { |
+AudioClock::AudioClock(base::TimeDelta start_timestamp, int sample_rate) |
+ : start_timestamp_(start_timestamp), sample_rate_(sample_rate) { |
} |
AudioClock::~AudioClock() { |
} |
-void AudioClock::WroteAudio(int frames, |
+void AudioClock::WroteAudio(int frames_written, |
+ int frames_requested, |
int delay_frames, |
- float playback_rate, |
- base::TimeDelta timestamp) { |
- CHECK_GT(playback_rate, 0); |
- CHECK(timestamp != kNoTimestamp()); |
- DCHECK_GE(frames, 0); |
+ float playback_rate) { |
+ DCHECK_GE(frames_written, 0); |
+ DCHECK_LE(frames_written, frames_requested); |
DCHECK_GE(delay_frames, 0); |
+ DCHECK_GE(playback_rate, 0); |
- if (last_endpoint_timestamp_ == kNoTimestamp()) |
- PushBufferedAudio(delay_frames, 0, kNoTimestamp()); |
+ // First write: initialize buffer with silence. |
+ if (buffered_.empty()) |
+ PushAudioData(&buffered_, delay_frames, 0.0f); |
- TrimBufferedAudioToMatchDelay(delay_frames); |
- PushBufferedAudio(frames, playback_rate, timestamp); |
+ // Move frames from |buffered_| to |played_| based on |delay_frames|. |
+ int64_t played_frames = std::max(0L, TotalFrames(buffered_) - delay_frames); |
DaleCurtis
2014/08/02 00:51:32
Seems like you could simply keep track of TotalFra
scherkus (not reviewing)
2014/08/02 01:55:24
Done.
|
+ while (played_frames > 0) { |
+ int64_t frames_to_move = std::min(buffered_.front().frames, played_frames); |
- last_endpoint_timestamp_ = timestamp; |
-} |
+ // No need to keep track of silent audio. |
+ if (buffered_.front().playback_rate > 0.0f) |
+ PushAudioData(&played_, frames_to_move, buffered_.front().playback_rate); |
-void AudioClock::WroteSilence(int frames, int delay_frames) { |
- DCHECK_GE(frames, 0); |
- DCHECK_GE(delay_frames, 0); |
+ buffered_.front().frames -= frames_to_move; |
+ if (buffered_.front().frames == 0) |
+ buffered_.pop_front(); |
- if (last_endpoint_timestamp_ == kNoTimestamp()) |
- PushBufferedAudio(delay_frames, 0, kNoTimestamp()); |
+ played_frames -= frames_to_move; |
+ } |
- TrimBufferedAudioToMatchDelay(delay_frames); |
- PushBufferedAudio(frames, 0, kNoTimestamp()); |
+ // Push in newly buffered data. |
+ PushAudioData(&buffered_, frames_written, playback_rate); |
+ PushAudioData(&buffered_, frames_requested - frames_written, 0.0f); |
} |
base::TimeDelta AudioClock::CurrentMediaTimestamp( |
base::TimeDelta time_since_writing) const { |
- int frames_to_skip = |
- static_cast<int>(time_since_writing.InSecondsF() * sample_rate_); |
- int silence_frames = 0; |
- for (size_t i = 0; i < buffered_audio_.size(); ++i) { |
- int frames = buffered_audio_[i].frames; |
- if (frames_to_skip > 0) { |
- if (frames <= frames_to_skip) { |
- frames_to_skip -= frames; |
- continue; |
- } |
- frames -= frames_to_skip; |
- frames_to_skip = 0; |
- } |
+ // Count up all |played_| audio since |start_timestamp_|. |
+ base::TimeDelta current_timestamp = start_timestamp_; |
+ for (size_t i = 0; i < played_.size(); ++i) { |
DaleCurtis
2014/08/02 00:51:33
played_ never shrinks, so you should just cache cu
scherkus (not reviewing)
2014/08/02 01:55:24
have to think about it some more ... but doesn't t
DaleCurtis
2014/08/04 18:55:18
I'd guess only slightly more error than you're alr
|
+ DCHECK_NE(played_[i].playback_rate, 0.0f) |
+ << "Silent audio doesn't need to be tracked in |played_|."; |
+ current_timestamp += base::TimeDelta::FromMicroseconds( |
DaleCurtis
2014/08/02 00:51:32
calculate this as a double or float and only divid
scherkus (not reviewing)
2014/08/02 01:55:25
Done.
|
+ (played_[i].frames * played_[i].playback_rate) / sample_rate_ * |
+ base::Time::kMicrosecondsPerSecond); |
+ } |
- // Account for silence ahead of the buffer closest to being played. |
- if (buffered_audio_[i].playback_rate == 0) { |
- silence_frames += frames; |
- continue; |
- } |
+ // Count up all |buffered_| audio based on |time_since_writing|. |
+ int64_t frames_played_since_writing = |
+ static_cast<int64_t>(time_since_writing.InSecondsF() * sample_rate_); |
+ for (size_t i = 0; i < buffered_.size() && frames_played_since_writing > 0; |
DaleCurtis
2014/08/02 00:51:33
You could cache this too and subtract off time_sin
scherkus (not reviewing)
2014/08/02 01:55:24
Done.
DaleCurtis
2014/08/04 18:55:18
You said done, but didn't do this. Was that your
|
+ ++i) { |
+ int64_t frames_played = |
+ std::min(buffered_[i].frames, frames_played_since_writing); |
+ current_timestamp += base::TimeDelta::FromMicroseconds( |
DaleCurtis
2014/08/02 00:51:32
Ditto.
DaleCurtis
2014/08/04 18:55:18
I meant break out the TimeDelta conversion like ab
|
+ (frames_played * buffered_[i].playback_rate) / sample_rate_ * |
+ base::Time::kMicrosecondsPerSecond); |
+ frames_played_since_writing -= frames_played; |
+ } |
+ |
+ return current_timestamp; |
+} |
+ |
+base::TimeDelta AudioClock::ContiguousAudioDataBuffered() const { |
+ base::TimeDelta buffered; |
+ for (size_t i = 0; i < buffered_.size(); ++i) { |
+ // Any buffered silence breaks our contiguous stretch of audio data. |
+ if (buffered_[i].playback_rate == 0) |
+ break; |
// Multiply by playback rate as frames represent time-scaled audio. |
- return buffered_audio_[i].endpoint_timestamp - |
- base::TimeDelta::FromMicroseconds( |
- ((frames * buffered_audio_[i].playback_rate) + silence_frames) / |
- sample_rate_ * base::Time::kMicrosecondsPerSecond); |
+ buffered += base::TimeDelta::FromMicroseconds( |
+ (buffered_[i].frames * buffered_[i].playback_rate) / sample_rate_ * |
+ base::Time::kMicrosecondsPerSecond); |
} |
- // Either: |
- // 1) AudioClock is uninitialziated and we'll return kNoTimestamp() |
- // 2) All previously buffered audio has been replaced by silence, |
- // meaning media time is now at the last endpoint |
- return last_endpoint_timestamp_; |
+ return buffered; |
} |
-void AudioClock::TrimBufferedAudioToMatchDelay(int delay_frames) { |
- if (buffered_audio_.empty()) |
- return; |
+base::TimeDelta AudioClock::ContiguousAudioDataBufferedAtSameRate() const { |
+ base::TimeDelta buffered; |
+ for (size_t i = 0; i < buffered_.size(); ++i) { |
+ // Any buffered silence breaks our contiguous stretch of audio data. |
+ if (buffered_[i].playback_rate == 0) |
+ break; |
- size_t i = buffered_audio_.size() - 1; |
- while (true) { |
- if (buffered_audio_[i].frames <= delay_frames) { |
- // Reached the end before accounting for all of |delay_frames|. This |
- // means we haven't written enough audio data yet to account for hardware |
- // delay. In this case, do nothing. |
- if (i == 0) |
- return; |
- |
- // Keep accounting for |delay_frames|. |
- delay_frames -= buffered_audio_[i].frames; |
- --i; |
- continue; |
+ // Multiply by playback rate as frames represent time-scaled audio. |
+ buffered = base::TimeDelta::FromMicroseconds( |
DaleCurtis
2014/08/02 00:51:33
Should this be += ? I don't understand why you hav
scherkus (not reviewing)
2014/08/02 01:55:24
Nah this was just silly. We always break so this d
|
+ (buffered_[i].frames * buffered_[i].playback_rate) / sample_rate_ * |
+ base::Time::kMicrosecondsPerSecond); |
+ |
+ if ((i + 1) < buffered_.size()) { |
+ DCHECK_NE(buffered_[i].playback_rate, buffered_[i + 1].playback_rate) |
+ << "Adjacent AudioData elements shouldn't have same playback rate"; |
} |
- // All of |delay_frames| has been accounted for: adjust amount of frames |
- // left in current buffer. All preceeding elements with index < |i| should |
- // be considered played out and hence discarded. |
- buffered_audio_[i].frames = delay_frames; |
break; |
} |
- // At this point |i| points at what will be the new head of |buffered_audio_| |
- // however if it contains no audio it should be removed as well. |
- if (buffered_audio_[i].frames == 0) |
- ++i; |
+ return buffered; |
+} |
- buffered_audio_.erase(buffered_audio_.begin(), buffered_audio_.begin() + i); |
+bool AudioClock::AudioDataBuffered() const { |
+ for (size_t i = 0; i < buffered_.size(); ++i) { |
+ if (buffered_[i].playback_rate != 0) { |
+ DCHECK_NE(buffered_[i].frames, 0) |
+ << "AudioData elements with zero frames shouldn't exist"; |
+ return true; |
+ } |
+ } |
+ return false; |
} |
-void AudioClock::PushBufferedAudio(int frames, |
- float playback_rate, |
- base::TimeDelta endpoint_timestamp) { |
- if (playback_rate == 0) |
- DCHECK(endpoint_timestamp == kNoTimestamp()); |
+AudioClock::AudioData::AudioData(int64_t frames, float playback_rate) |
DaleCurtis
2014/08/02 00:51:33
Up to you, but you can remove this and use a POD-t
|
+ : frames(frames), playback_rate(playback_rate) { |
+} |
+// static |
+void AudioClock::PushAudioData(std::deque<AudioData>* audio_data, |
+ int64_t frames, |
+ float playback_rate) { |
if (frames == 0) |
return; |
// Avoid creating extra elements where possible. |
- if (!buffered_audio_.empty() && |
- buffered_audio_.back().playback_rate == playback_rate) { |
- buffered_audio_.back().frames += frames; |
- buffered_audio_.back().endpoint_timestamp = endpoint_timestamp; |
+ if (!audio_data->empty() && |
+ audio_data->back().playback_rate == playback_rate) { |
+ audio_data->back().frames += frames; |
return; |
} |
- buffered_audio_.push_back( |
- BufferedAudio(frames, playback_rate, endpoint_timestamp)); |
+ audio_data->push_back(AudioData(frames, playback_rate)); |
} |
-AudioClock::BufferedAudio::BufferedAudio(int frames, |
- float playback_rate, |
- base::TimeDelta endpoint_timestamp) |
- : frames(frames), |
- playback_rate(playback_rate), |
- endpoint_timestamp(endpoint_timestamp) { |
+// static |
+int64_t AudioClock::TotalFrames(const std::deque<AudioData>& audio_data) { |
+ int64_t total_frames = 0; |
+ for (size_t i = 0; i < audio_data.size(); ++i) { |
+ total_frames += audio_data[i].frames; |
+ } |
+ return total_frames; |
} |
} // namespace media |