Chromium Code Reviews| Index: media/filters/chunk_demuxer.cc |
| diff --git a/media/filters/chunk_demuxer.cc b/media/filters/chunk_demuxer.cc |
| index 1c875906ac7798b3022f0c51527c67911f9c4852..079bdce6f48cb341e307d6e4351571307a0591ab 100644 |
| --- a/media/filters/chunk_demuxer.cc |
| +++ b/media/filters/chunk_demuxer.cc |
| @@ -73,6 +73,11 @@ static const SupportedTypeInfo kSupportedTypeInfo[] = { |
| { "audio/mp4", &BuildMP4Parser, kAudioMP4Codecs }, |
| }; |
| + |
| +// The fake total size we use for converting times to bytes |
| +// for AddBufferedByteRange() calls. |
| +enum { kFakeTotalBytes = 1000000 }; |
|
Ami GONE FROM CHROMIUM
2012/06/19 17:40:37
TODO to drop this in favor of teaching Pipeline to
acolwell GONE FROM CHROMIUM
2012/06/19 19:50:15
Done.
|
| + |
| // Checks to see if the specified |type| and |codecs| list are supported. |
| // Returns true if |type| and all codecs listed in |codecs| are supported. |
| // |factory_function| contains a function that can build a StreamParser |
| @@ -157,7 +162,7 @@ class ChunkDemuxerStream : public DemuxerStream { |
| bool Append(const StreamParser::BufferQueue& buffers); |
| // Returns a list of the buffered time ranges. |
| - SourceBufferStream::TimespanList GetBufferedTime() const; |
| + Ranges<base::TimeDelta> GetBufferedTime() const; |
| // Signal to the stream that buffers handed in through subsequent calls to |
| // Append() belong to a media segment that starts at |start_timestamp|. |
| @@ -290,7 +295,7 @@ bool ChunkDemuxerStream::Append(const StreamParser::BufferQueue& buffers) { |
| return true; |
| } |
| -SourceBufferStream::TimespanList ChunkDemuxerStream::GetBufferedTime() const { |
| +Ranges<base::TimeDelta> ChunkDemuxerStream::GetBufferedTime() const { |
| base::AutoLock auto_lock(lock_); |
| return stream_->GetBufferedTime(); |
| } |
| @@ -463,8 +468,7 @@ void ChunkDemuxerStream::CreateReadDoneClosures_Locked(ClosureQueue* closures) { |
| ChunkDemuxer::ChunkDemuxer(ChunkDemuxerClient* client) |
| : state_(WAITING_FOR_INIT), |
| host_(NULL), |
| - client_(client), |
| - buffered_bytes_(0) { |
| + client_(client) { |
| DCHECK(client); |
| } |
| @@ -627,99 +631,58 @@ void ChunkDemuxer::RemoveId(const std::string& id) { |
| video_->Shutdown(); |
| } |
| -bool ChunkDemuxer::GetBufferedRanges(const std::string& id, |
| - Ranges* ranges_out) const { |
| +Ranges<base::TimeDelta> ChunkDemuxer::GetBufferedRanges( |
| + const std::string& id) const { |
| DCHECK(!id.empty()); |
| DCHECK_GT(stream_parser_map_.count(id), 0u); |
| DCHECK(id == source_id_audio_ || id == source_id_video_); |
| - DCHECK(ranges_out); |
| base::AutoLock auto_lock(lock_); |
| if (id == source_id_audio_ && id != source_id_video_) { |
| // Only include ranges that have been buffered in |audio_| |
| - return CopyIntoRanges(audio_->GetBufferedTime(), ranges_out); |
| + return audio_->GetBufferedTime(); |
| } |
| if (id != source_id_audio_ && id == source_id_video_) { |
| // Only include ranges that have been buffered in |video_| |
| - return CopyIntoRanges(video_->GetBufferedTime(), ranges_out); |
| + return video_->GetBufferedTime(); |
| } |
| - // Include ranges that have been buffered in both |audio_| and |video_|. |
| - SourceBufferStream::TimespanList audio_ranges = audio_->GetBufferedTime(); |
| - SourceBufferStream::TimespanList video_ranges = video_->GetBufferedTime(); |
| - SourceBufferStream::TimespanList::const_iterator video_ranges_itr = |
| - video_ranges.begin(); |
| - SourceBufferStream::TimespanList::const_iterator audio_ranges_itr = |
| - audio_ranges.begin(); |
| - bool success = false; |
| - |
| - while (audio_ranges_itr != audio_ranges.end() && |
| - video_ranges_itr != video_ranges.end()) { |
| - // If this is the last range and EndOfStream() was called (i.e. all data |
| - // has been appended), choose the max end point of the ranges. |
| - bool last_range_after_ended = |
| - state_ == ENDED && |
| - (audio_ranges_itr + 1) == audio_ranges.end() && |
| - (video_ranges_itr + 1) == video_ranges.end(); |
| - |
| - // Audio range start time is within the video range. |
| - if ((*audio_ranges_itr).first >= (*video_ranges_itr).first && |
| - (*audio_ranges_itr).first <= (*video_ranges_itr).second) { |
| - AddIntersectionRange(*audio_ranges_itr, *video_ranges_itr, |
| - last_range_after_ended, ranges_out); |
| - audio_ranges_itr++; |
| - success = true; |
| - continue; |
| - } |
| - |
| - // Video range start time is within the audio range. |
| - if ((*video_ranges_itr).first >= (*audio_ranges_itr).first && |
| - (*video_ranges_itr).first <= (*audio_ranges_itr).second) { |
| - AddIntersectionRange(*video_ranges_itr, *audio_ranges_itr, |
| - last_range_after_ended, ranges_out); |
| - video_ranges_itr++; |
| - success = true; |
| - continue; |
| - } |
| + return ComputeIntersection(); |
| +} |
| - // No overlap was found. Increment the earliest one and keep looking. |
| - if ((*audio_ranges_itr).first < (*video_ranges_itr).first) |
| - audio_ranges_itr++; |
| - else |
| - video_ranges_itr++; |
| - } |
| +Ranges<base::TimeDelta> ChunkDemuxer::ComputeIntersection() const { |
| + lock_.AssertAcquired(); |
| - return success; |
| -} |
| + if (!audio_ || !video_) |
| + return Ranges<base::TimeDelta>(); |
| -bool ChunkDemuxer::CopyIntoRanges( |
| - const SourceBufferStream::TimespanList& timespans, |
| - Ranges* ranges_out) const { |
| - for (SourceBufferStream::TimespanList::const_iterator itr = timespans.begin(); |
| - itr != timespans.end(); ++itr) { |
| - ranges_out->push_back(*itr); |
| + // Include ranges that have been buffered in both |audio_| and |video_|. |
| + Ranges<base::TimeDelta> audio_ranges = audio_->GetBufferedTime(); |
| + Ranges<base::TimeDelta> video_ranges = video_->GetBufferedTime(); |
| + Ranges<base::TimeDelta> result = audio_ranges.IntersectionWith(video_ranges); |
| + |
| + if (state_ == ENDED) { |
| + // If appending has ended, extend the intersection to include all of the |
| + // data from the last ranges for each stream. This allows the buffered |
| + // information to match the actual time range that will get played out if |
| + // the streams have slightly different lengths. We only do this if the |
| + // final ranges overlap because this indicates there is actually a location |
| + // you can seek to, near the end, where we have data for both streams. |
| + base::TimeDelta audio_start = audio_ranges.start(audio_ranges.size() - 1); |
| + base::TimeDelta audio_end = audio_ranges.end(audio_ranges.size() - 1); |
| + base::TimeDelta video_start = video_ranges.start(video_ranges.size() - 1); |
| + base::TimeDelta video_end = video_ranges.end(video_ranges.size() - 1); |
| + |
| + if ((audio_start <= video_start && video_start <= audio_end) || |
| + (video_start <= audio_start && audio_start <= video_end)) { |
| + result.Add(std::max(audio_start, video_start), |
|
Ami GONE FROM CHROMIUM
2012/06/19 17:40:37
should this be a "min" instead of a max? Otherwis
acolwell GONE FROM CHROMIUM
2012/06/19 19:50:15
I don't want min here. I only want the intersectio
|
| + std::max(audio_end, video_end)); |
| + } |
| } |
| - return !timespans.empty(); |
| -} |
| -void ChunkDemuxer::AddIntersectionRange( |
| - SourceBufferStream::Timespan timespan_a, |
| - SourceBufferStream::Timespan timespan_b, |
| - bool last_range_after_ended, |
| - Ranges* ranges_out) const { |
| - base::TimeDelta start = timespan_a.first; |
| - |
| - // If this is the last range after EndOfStream() was called, choose the later |
| - // end point of the ranges, otherwise choose the earlier. |
| - base::TimeDelta end; |
| - if (last_range_after_ended) |
| - end = std::max(timespan_a.second, timespan_b.second); |
| - else |
| - end = std::min(timespan_a.second, timespan_b.second); |
| - |
| - ranges_out->push_back(std::make_pair(start, end)); |
| + return result; |
| } |
| bool ChunkDemuxer::AppendData(const std::string& id, |
| @@ -731,7 +694,7 @@ bool ChunkDemuxer::AppendData(const std::string& id, |
| DCHECK(data); |
| DCHECK_GT(length, 0u); |
| - int64 buffered_bytes = 0; |
| + Ranges<base::TimeDelta> ranges; |
| PipelineStatusCB cb; |
| { |
| @@ -772,13 +735,27 @@ bool ChunkDemuxer::AppendData(const std::string& id, |
| std::swap(cb, seek_cb_); |
| } |
| - buffered_bytes_ += length; |
| - buffered_bytes = buffered_bytes_; |
| + if (duration_ > base::TimeDelta() && duration_ != kInfiniteDuration()) { |
| + if (audio_ && !video_) { |
| + ranges = audio_->GetBufferedTime(); |
| + } else if (!audio_ && video_) { |
| + ranges = video_->GetBufferedTime(); |
| + } else { |
| + ranges = ComputeIntersection(); |
| + } |
| + } |
| } |
| - // Notify the host of 'network activity' because we got data, using a bogus |
| - // range. |
| - host_->AddBufferedByteRange(0, buffered_bytes); |
| + for (size_t i = 0; i < ranges.size(); ++i) { |
| + DCHECK(duration_ > base::TimeDelta()); |
|
Ami GONE FROM CHROMIUM
2012/06/19 17:40:37
silly to do this inside the for-loop; move up as:
acolwell GONE FROM CHROMIUM
2012/06/19 19:50:15
Done. Used !ranges.size() since empty() doesn't ex
|
| + |
| + // Notify the host of 'network activity' because we got data. |
| + int64 start = |
| + kFakeTotalBytes * ranges.start(i).InSecondsF() / duration_.InSecondsF(); |
| + int64 end = |
| + kFakeTotalBytes * ranges.end(i).InSecondsF() / duration_.InSecondsF(); |
| + host_->AddBufferedByteRange(start, end); |
| + } |
| if (!cb.is_null()) |
| cb.Run(PIPELINE_OK); |
| @@ -947,6 +924,8 @@ void ChunkDemuxer::OnStreamParserInitDone(bool success, |
| (!source_id_video_.empty() && !video_)) |
| return; |
| + if (duration_ > base::TimeDelta() && duration_ != kInfiniteDuration()) |
| + host_->SetTotalBytes(kFakeTotalBytes); |
| host_->SetDuration(duration_); |
| ChangeState_Locked(INITIALIZED); |