Index: media/filters/frame_processor.cc |
diff --git a/media/filters/frame_processor.cc b/media/filters/frame_processor.cc |
index e77e452fd8e184f0b5a8b06cad657afd9e5ff8a9..a648c1d1d7b72dfb7643684569cc3bb128139beb 100644 |
--- a/media/filters/frame_processor.cc |
+++ b/media/filters/frame_processor.cc |
@@ -161,6 +161,7 @@ bool MseTrackBuffer::FlushProcessedFrames() { |
FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb, |
const scoped_refptr<MediaLog>& media_log) |
: group_start_timestamp_(kNoTimestamp()), |
+ last_processed_decode_timestamp_(kNoDecodeTimestamp()), |
update_duration_cb_(update_duration_cb), |
media_log_(media_log) { |
DVLOG(2) << __FUNCTION__ << "()"; |
@@ -194,7 +195,6 @@ bool FrameProcessor::ProcessFrames( |
const StreamParser::TextBufferQueueMap& text_map, |
base::TimeDelta append_window_start, |
base::TimeDelta append_window_end, |
- bool* new_media_segment, |
base::TimeDelta* timestamp_offset) { |
StreamParser::BufferQueue frames; |
if (!MergeBufferQueues(audio_buffers, video_buffers, text_map, &frames)) { |
@@ -214,7 +214,7 @@ bool FrameProcessor::ProcessFrames( |
for (StreamParser::BufferQueue::const_iterator frames_itr = frames.begin(); |
frames_itr != frames.end(); ++frames_itr) { |
if (!ProcessFrame(*frames_itr, append_window_start, append_window_end, |
- timestamp_offset, new_media_segment)) { |
+ timestamp_offset)) { |
FlushProcessedFrames(); |
return false; |
} |
@@ -287,6 +287,10 @@ void FrameProcessor::Reset() { |
itr != track_buffers_.end(); ++itr) { |
itr->second->Reset(); |
} |
+ |
+ last_processed_decode_timestamp_ = kNoDecodeTimestamp(); |
+ in_coded_frame_group_ = |
+ false; // BIG TODO: Does this need to be conditioned on appendMode? |
} |
void FrameProcessor::OnPossibleAudioConfigUpdate( |
@@ -312,14 +316,14 @@ MseTrackBuffer* FrameProcessor::FindTrack(StreamParser::TrackId id) { |
return itr->second; |
} |
-void FrameProcessor::NotifyNewMediaSegmentStarting( |
- DecodeTimestamp segment_timestamp) { |
- DVLOG(2) << __FUNCTION__ << "(" << segment_timestamp.InSecondsF() << ")"; |
+void FrameProcessor::NotifyStartOfCodedFrameGroup( |
+ DecodeTimestamp start_timestamp) { |
+ DVLOG(2) << __FUNCTION__ << "(" << start_timestamp.InSecondsF() << ")"; |
for (TrackBufferMap::iterator itr = track_buffers_.begin(); |
itr != track_buffers_.end(); |
++itr) { |
- itr->second->stream()->OnNewMediaSegment(segment_timestamp); |
+ itr->second->stream()->OnStartOfCodedFrameGroup(start_timestamp); |
} |
} |
@@ -440,8 +444,7 @@ bool FrameProcessor::ProcessFrame( |
const scoped_refptr<StreamParserBuffer>& frame, |
base::TimeDelta append_window_start, |
base::TimeDelta append_window_end, |
- base::TimeDelta* timestamp_offset, |
- bool* new_media_segment) { |
+ base::TimeDelta* timestamp_offset) { |
// Implements the loop within step 1 of the coded frame processing algorithm |
// for a single input frame per April 1, 2014 MSE spec editor's draft: |
// https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/ |
@@ -581,6 +584,10 @@ bool FrameProcessor::ProcessFrame( |
// times last frame duration: |
DecodeTimestamp last_decode_timestamp = |
track_buffer->last_decode_timestamp(); |
+ // BIG TODO: continue here, using last_processed_decode_timestamp_. And |
+ // conditionally resetting in_coded_frame_group_. And investigating how |
+ // common single-stream-cluster appends are in mp4 or other muxed byte |
+ // stream formats. |
if (last_decode_timestamp != kNoDecodeTimestamp()) { |
base::TimeDelta dts_delta = decode_timestamp - last_decode_timestamp; |
if (dts_delta < base::TimeDelta() || |
@@ -593,8 +600,8 @@ bool FrameProcessor::ProcessFrame( |
group_end_timestamp_ = presentation_timestamp; |
// This triggers a discontinuity so we need to treat the next frames |
// appended within the append window as if they were the beginning of |
- // a new segment. |
- *new_media_segment = true; |
+ // a new coded frame group. |
+ in_coded_frame_group_ = false; |
} else { |
DVLOG(3) << __FUNCTION__ << " : Sequence mode discontinuity, GETS: " |
<< group_end_timestamp_.InSecondsF(); |
@@ -684,19 +691,19 @@ bool FrameProcessor::ProcessFrame( |
} |
// We now have a processed buffer to append to the track buffer's stream. |
- // If it is the first in a new media segment or following a discontinuity, |
- // notify all the track buffers' streams that a new segment is beginning. |
- if (*new_media_segment) { |
- // First, complete the append to track buffer streams of previous media |
- // segment's frames, if any. |
+ // If it is the first in a new coded frame group (such as following a |
+ // discontinuity), notify all the track buffers' streams that a coded frame |
+ // group is starting. |
+ if (!in_coded_frame_group_) { |
+ // First, complete the append to track buffer streams of the previous |
+ // coded frame group's frames, if any. |
if (!FlushProcessedFrames()) |
return false; |
- *new_media_segment = false; |
- |
// TODO(acolwell/wolenetz): This should be changed to a presentation |
// timestamp. See http://crbug.com/402502 |
- NotifyNewMediaSegmentStarting(decode_timestamp); |
+ NotifyStartOfCodedFrameGroup(decode_timestamp); |
+ in_coded_frame_group_ = true; |
} |
DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, " |
@@ -705,7 +712,7 @@ bool FrameProcessor::ProcessFrame( |
// Steps 13-18: Note, we optimize by appending groups of contiguous |
// processed frames for each track buffer at end of ProcessFrames() or prior |
- // to NotifyNewMediaSegmentStarting(). |
+ // to NotifyStartOfCodedFrameGroup(). |
// TODO(wolenetz): Refactor SourceBufferStream to conform to spec GC timing. |
// See http://crbug.com/371197. |
track_buffer->EnqueueProcessedFrame(frame); |