Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(460)

Side by Side Diff: media/filters/frame_processor.cc

Issue 180153003: Implement core of compliant MediaSource coded frame processing (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Rebase to ToT in preparation for dcommit Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « media/filters/frame_processor.h ('k') | media/filters/frame_processor_base.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "media/filters/frame_processor.h"
6
7 #include "base/stl_util.h"
8 #include "media/base/buffers.h"
9 #include "media/base/stream_parser_buffer.h"
10
11 namespace media {
12
13 FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb)
14 : update_duration_cb_(update_duration_cb) {
15 DVLOG(2) << __FUNCTION__ << "()";
16 DCHECK(!update_duration_cb.is_null());
17 }
18
19 FrameProcessor::~FrameProcessor() {
20 DVLOG(2) << __FUNCTION__;
21 }
22
23 void FrameProcessor::SetSequenceMode(bool sequence_mode) {
24 DVLOG(2) << __FUNCTION__ << "(" << sequence_mode << ")";
25
26 // Per April 1, 2014 MSE spec editor's draft:
27 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/media- source.html#widl-SourceBuffer-mode
28 // Step 7: If the new mode equals "sequence", then set the group start
29 // timestamp to the group end timestamp.
30 if (sequence_mode) {
31 DCHECK(kNoTimestamp() != group_end_timestamp_);
32 group_start_timestamp_ = group_end_timestamp_;
33 }
34
35 // Step 8: Update the attribute to new mode.
36 sequence_mode_ = sequence_mode;
37 }
38
39 bool FrameProcessor::ProcessFrames(
40 const StreamParser::BufferQueue& audio_buffers,
41 const StreamParser::BufferQueue& video_buffers,
42 const StreamParser::TextBufferQueueMap& text_map,
43 base::TimeDelta append_window_start,
44 base::TimeDelta append_window_end,
45 bool* new_media_segment,
46 base::TimeDelta* timestamp_offset) {
47 StreamParser::BufferQueue frames;
48 if (!MergeBufferQueues(audio_buffers, video_buffers, text_map, &frames)) {
49 DVLOG(2) << "Parse error discovered while merging parser's buffers";
50 return false;
51 }
52
53 DCHECK(!frames.empty());
54
55 // Implements the coded frame processing algorithm's outer loop for step 1.
56 // Note that ProcessFrame() implements an inner loop for a single frame that
57 // handles "jump to the Loop Top step to restart processing of the current
58 // coded frame" per April 1, 2014 MSE spec editor's draft:
59 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
60 // media-source.html#sourcebuffer-coded-frame-processing
61 // 1. For each coded frame in the media segment run the following steps:
62 for (StreamParser::BufferQueue::const_iterator frames_itr = frames.begin();
63 frames_itr != frames.end(); ++frames_itr) {
64 if (!ProcessFrame(*frames_itr, append_window_start, append_window_end,
65 timestamp_offset, new_media_segment)) {
66 return false;
67 }
68 }
69
70 // 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element.
71
72 // Step 5:
73 update_duration_cb_.Run(group_end_timestamp_);
74
75 return true;
76 }
77
78 bool FrameProcessor::ProcessFrame(scoped_refptr<StreamParserBuffer> frame,
79 base::TimeDelta append_window_start,
80 base::TimeDelta append_window_end,
81 base::TimeDelta* timestamp_offset,
82 bool* new_media_segment) {
83 // Implements the loop within step 1 of the coded frame processing algorithm
84 // for a single input frame per April 1, 2014 MSE spec editor's draft:
85 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
86 // media-source.html#sourcebuffer-coded-frame-processing
87
88 while (true) {
89 // 1. Loop Top: Let presentation timestamp be a double precision floating
90 // point representation of the coded frame's presentation timestamp in
91 // seconds.
92 // 2. Let decode timestamp be a double precision floating point
93 // representation of the coded frame's decode timestamp in seconds.
94 // 3. Let frame duration be a double precision floating point representation
95 // of the coded frame's duration in seconds.
96 // We use base::TimeDelta instead of double.
97 base::TimeDelta presentation_timestamp = frame->timestamp();
98 base::TimeDelta decode_timestamp = frame->GetDecodeTimestamp();
99 base::TimeDelta frame_duration = frame->duration();
100
101 DVLOG(3) << __FUNCTION__ << ": Processing frame "
102 << "Type=" << frame->type()
103 << ", TrackID=" << frame->track_id()
104 << ", PTS=" << presentation_timestamp.InSecondsF()
105 << ", DTS=" << decode_timestamp.InSecondsF()
106 << ", DUR=" << frame_duration.InSecondsF();
107
108 // Sanity check the timestamps.
109 if (presentation_timestamp < base::TimeDelta()) {
110 DVLOG(2) << __FUNCTION__ << ": Negative or unknown frame PTS: "
111 << presentation_timestamp.InSecondsF();
112 return false;
113 }
114 if (decode_timestamp < base::TimeDelta()) {
115 DVLOG(2) << __FUNCTION__ << ": Negative or unknown frame DTS: "
116 << decode_timestamp.InSecondsF();
117 return false;
118 }
119 if (decode_timestamp > presentation_timestamp) {
120 // TODO(wolenetz): Determine whether DTS>PTS should really be allowed. See
121 // http://crbug.com/354518.
122 DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS("
123 << decode_timestamp.InSecondsF() << ") > PTS("
124 << presentation_timestamp.InSecondsF() << ")";
125 }
126
127 // TODO(acolwell/wolenetz): All stream parsers must emit valid (positive)
128 // frame durations. For now, we allow non-negative frame duration.
129 // See http://crbug.com/351166.
130 if (frame_duration == kNoTimestamp()) {
131 DVLOG(2) << __FUNCTION__ << ": Frame missing duration (kNoTimestamp())";
132 return false;
133 }
134 if (frame_duration < base::TimeDelta()) {
135 DVLOG(2) << __FUNCTION__ << ": Frame duration negative: "
136 << frame_duration.InSecondsF();
137 return false;
138 }
139
140 // 4. If mode equals "sequence" and group start timestamp is set, then run
141 // the following steps:
142 if (sequence_mode_ && group_start_timestamp_ != kNoTimestamp()) {
143 // 4.1. Set timestampOffset equal to group start timestamp -
144 // presentation timestamp.
145 *timestamp_offset = group_start_timestamp_ - presentation_timestamp;
146
147 DVLOG(3) << __FUNCTION__ << ": updated timestampOffset is now "
148 << timestamp_offset->InSecondsF();
149
150 // 4.2. Set group end timestamp equal to group start timestamp.
151 group_end_timestamp_ = group_start_timestamp_;
152
153 // 4.3. Set the need random access point flag on all track buffers to
154 // true.
155 SetAllTrackBuffersNeedRandomAccessPoint();
156
157 // 4.4. Unset group start timestamp.
158 group_start_timestamp_ = kNoTimestamp();
159 }
160
161 // 5. If timestampOffset is not 0, then run the following steps:
162 if (*timestamp_offset != base::TimeDelta()) {
163 // 5.1. Add timestampOffset to the presentation timestamp.
164 // Note: |frame| PTS is only updated if it survives processing.
165 presentation_timestamp += *timestamp_offset;
166
167 // 5.2. Add timestampOffset to the decode timestamp.
168 // Frame DTS is only updated if it survives processing.
169 decode_timestamp += *timestamp_offset;
170 }
171
172 // 6. Let track buffer equal the track buffer that the coded frame will be
173 // added to.
174
175 // Remap audio and video track types to their special singleton identifiers.
176 StreamParser::TrackId track_id = kAudioTrackId;
177 switch (frame->type()) {
178 case DemuxerStream::AUDIO:
179 break;
180 case DemuxerStream::VIDEO:
181 track_id = kVideoTrackId;
182 break;
183 case DemuxerStream::TEXT:
184 track_id = frame->track_id();
185 break;
186 case DemuxerStream::UNKNOWN:
187 case DemuxerStream::NUM_TYPES:
188 DCHECK(false) << ": Invalid frame type " << frame->type();
189 return false;
190 }
191
192 MseTrackBuffer* track_buffer = FindTrack(track_id);
193 if (!track_buffer) {
194 DVLOG(2) << __FUNCTION__ << ": Unknown track: type=" << frame->type()
195 << ", frame processor track id=" << track_id
196 << ", parser track id=" << frame->track_id();
197 return false;
198 }
199
200 // 7. If last decode timestamp for track buffer is set and decode timestamp
201 // is less than last decode timestamp
202 // OR
203 // If last decode timestamp for track buffer is set and the difference
204 // between decode timestamp and last decode timestamp is greater than 2
205 // times last frame duration:
206 base::TimeDelta last_decode_timestamp =
207 track_buffer->last_decode_timestamp();
208 if (last_decode_timestamp != kNoTimestamp()) {
209 base::TimeDelta dts_delta = decode_timestamp - last_decode_timestamp;
210 if (dts_delta < base::TimeDelta() ||
211 dts_delta > 2 * track_buffer->last_frame_duration()) {
212 // 7.1. If mode equals "segments": Set group end timestamp to
213 // presentation timestamp.
214 // If mode equals "sequence": Set group start timestamp equal to
215 // the group end timestamp.
216 if (!sequence_mode_) {
217 group_end_timestamp_ = presentation_timestamp;
218 // This triggers a discontinuity so we need to treat the next frames
219 // appended within the append window as if they were the beginning of
220 // a new segment.
221 *new_media_segment = true;
222 } else {
223 DVLOG(3) << __FUNCTION__ << " : Sequence mode discontinuity, GETS: "
224 << group_end_timestamp_.InSecondsF();
225 DCHECK(kNoTimestamp() != group_end_timestamp_);
226 group_start_timestamp_ = group_end_timestamp_;
227 }
228
229 // 7.2. - 7.5.:
230 Reset();
231
232 // 7.6. Jump to the Loop Top step above to restart processing of the
233 // current coded frame.
234 DVLOG(3) << __FUNCTION__ << ": Discontinuity: reprocessing frame";
235 continue;
236 }
237 }
238
239 // 8. If the presentation timestamp or decode timestamp is less than the
240 // presentation start time, then run the end of stream algorithm with the
241 // error parameter set to "decode", and abort these steps.
242 if (presentation_timestamp < base::TimeDelta() ||
243 decode_timestamp < base::TimeDelta()) {
244 DVLOG(2) << __FUNCTION__
245 << ": frame PTS=" << presentation_timestamp.InSecondsF()
246 << " or DTS=" << decode_timestamp.InSecondsF()
247 << " negative after applying timestampOffset and handling any "
248 << " discontinuity";
249 return false;
250 }
251
252 // 9. Let frame end timestamp equal the sum of presentation timestamp and
253 // frame duration.
254 base::TimeDelta frame_end_timestamp = presentation_timestamp +
255 frame_duration;
256
257 // 10. If presentation timestamp is less than appendWindowStart, then set
258 // the need random access point flag to true, drop the coded frame, and
259 // jump to the top of the loop to start processing the next coded
260 // frame.
261 // Note: We keep the result of partial discard of a buffer that overlaps
262 // |append_window_start| and does not end after |append_window_end|.
263 // 11. If frame end timestamp is greater than appendWindowEnd, then set the
264 // need random access point flag to true, drop the coded frame, and jump
265 // to the top of the loop to start processing the next coded frame.
266 if (presentation_timestamp < append_window_start ||
267 frame_end_timestamp > append_window_end) {
268 // See if a partial discard can be done around |append_window_start|.
269 // TODO(wolenetz): Refactor this into a base helper across legacy and
270 // new frame processors?
271 if (track_buffer->stream()->supports_partial_append_window_trimming() &&
272 presentation_timestamp < append_window_start &&
273 frame_end_timestamp > append_window_start &&
274 frame_end_timestamp <= append_window_end) {
275 DCHECK(frame->IsKeyframe());
276 DVLOG(1) << "Truncating buffer which overlaps append window start."
277 << " presentation_timestamp "
278 << presentation_timestamp.InSecondsF()
279 << " append_window_start " << append_window_start.InSecondsF();
280
281 // Adjust the timestamp of this frame forward to |append_window_start|,
282 // while decreasing the duration appropriately.
283 frame->set_discard_padding(std::make_pair(
284 append_window_start - presentation_timestamp, base::TimeDelta()));
285 presentation_timestamp = append_window_start; // |frame| updated below.
286 decode_timestamp = append_window_start; // |frame| updated below.
287 frame_duration = frame_end_timestamp - presentation_timestamp;
288 frame->set_duration(frame_duration);
289
290 // TODO(dalecurtis): This could also be done with |append_window_end|,
291 // but is not necessary since splice frames covert the overlap there.
292 } else {
293 track_buffer->set_needs_random_access_point(true);
294 DVLOG(3) << "Dropping frame that is outside append window.";
295
296 if (!sequence_mode_) {
297 // This also triggers a discontinuity so we need to treat the next
298 // frames appended within the append window as if they were the
299 // beginning of a new segment.
300 *new_media_segment = true;
301 }
302
303 return true;
304 }
305 }
306
307 // 12. If the need random access point flag on track buffer equals true,
308 // then run the following steps:
309 if (track_buffer->needs_random_access_point()) {
310 // 12.1. If the coded frame is not a random access point, then drop the
311 // coded frame and jump to the top of the loop to start processing
312 // the next coded frame.
313 if (!frame->IsKeyframe()) {
314 DVLOG(3) << __FUNCTION__
315 << ": Dropping frame that is not a random access point";
316 return true;
317 }
318
319 // 12.2. Set the need random access point flag on track buffer to false.
320 track_buffer->set_needs_random_access_point(false);
321 }
322
323 // We now have a processed buffer to append to the track buffer's stream.
324 // If it is the first in a new media segment or following a discontinuity,
325 // notify all the track buffers' streams that a new segment is beginning.
326 if (*new_media_segment) {
327 *new_media_segment = false;
328 NotifyNewMediaSegmentStarting(decode_timestamp);
329 }
330
331 DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, "
332 << "PTS=" << presentation_timestamp.InSecondsF()
333 << ", DTS=" << decode_timestamp.InSecondsF();
334 frame->set_timestamp(presentation_timestamp);
335 frame->SetDecodeTimestamp(decode_timestamp);
336
337 // Steps 13-18:
338 // TODO(wolenetz): Collect and emit more than one buffer at a time, if
339 // possible. Also refactor SourceBufferStream to conform to spec GC timing.
340 // See http://crbug.com/371197.
341 StreamParser::BufferQueue buffer_to_append;
342 buffer_to_append.push_back(frame);
343 track_buffer->stream()->Append(buffer_to_append);
344
345 // 19. Set last decode timestamp for track buffer to decode timestamp.
346 track_buffer->set_last_decode_timestamp(decode_timestamp);
347
348 // 20. Set last frame duration for track buffer to frame duration.
349 track_buffer->set_last_frame_duration(frame_duration);
350
351 // 21. If highest presentation timestamp for track buffer is unset or frame
352 // end timestamp is greater than highest presentation timestamp, then
353 // set highest presentation timestamp for track buffer to frame end
354 // timestamp.
355 track_buffer->SetHighestPresentationTimestampIfIncreased(
356 frame_end_timestamp);
357
358 // 22. If frame end timestamp is greater than group end timestamp, then set
359 // group end timestamp equal to frame end timestamp.
360 DCHECK(group_end_timestamp_ >= base::TimeDelta());
361 if (frame_end_timestamp > group_end_timestamp_)
362 group_end_timestamp_ = frame_end_timestamp;
363
364 return true;
365 }
366
367 NOTREACHED();
368 return false;
369 }
370
371 void FrameProcessor::SetAllTrackBuffersNeedRandomAccessPoint() {
372 for (TrackBufferMap::iterator itr = track_buffers_.begin();
373 itr != track_buffers_.end(); ++itr) {
374 itr->second->set_needs_random_access_point(true);
375 }
376 }
377
378 } // namespace media
OLDNEW
« no previous file with comments | « media/filters/frame_processor.h ('k') | media/filters/frame_processor_base.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698