Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(215)

Side by Side Diff: media/filters/frame_processor.cc

Issue 2064073004: MSE: Update FrameProcessor comments w.r.t. current spec (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/filters/frame_processor.h" 5 #include "media/filters/frame_processor.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <cstdlib> 9 #include <cstdlib>
10 10
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
170 DCHECK(!update_duration_cb.is_null()); 170 DCHECK(!update_duration_cb.is_null());
171 } 171 }
172 172
173 FrameProcessor::~FrameProcessor() { 173 FrameProcessor::~FrameProcessor() {
174 DVLOG(2) << __FUNCTION__ << "()"; 174 DVLOG(2) << __FUNCTION__ << "()";
175 STLDeleteValues(&track_buffers_); 175 STLDeleteValues(&track_buffers_);
176 } 176 }
177 177
178 void FrameProcessor::SetSequenceMode(bool sequence_mode) { 178 void FrameProcessor::SetSequenceMode(bool sequence_mode) {
179 DVLOG(2) << __FUNCTION__ << "(" << sequence_mode << ")"; 179 DVLOG(2) << __FUNCTION__ << "(" << sequence_mode << ")";
180 180 // Per June 9, 2016 MSE spec editor's draft:
181 // Per April 1, 2014 MSE spec editor's draft: 181 // https://rawgit.com/w3c/media-source/d8f901f22/
182 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/media- source.html#widl-SourceBuffer-mode 182 // index.html#widl-SourceBuffer-mode
183 // Step 7: If the new mode equals "sequence", then set the group start 183 // Step 7: If the new mode equals "sequence", then set the group start
184 // timestamp to the group end timestamp. 184 // timestamp to the group end timestamp.
185 if (sequence_mode) { 185 if (sequence_mode) {
186 DCHECK(kNoTimestamp() != group_end_timestamp_); 186 DCHECK(kNoTimestamp() != group_end_timestamp_);
187 group_start_timestamp_ = group_end_timestamp_; 187 group_start_timestamp_ = group_end_timestamp_;
188 } 188 }
189 189
190 // Step 8: Update the attribute to new mode. 190 // Step 8: Update the attribute to new mode.
191 sequence_mode_ = sequence_mode; 191 sequence_mode_ = sequence_mode;
192 } 192 }
193 193
194 bool FrameProcessor::ProcessFrames( 194 bool FrameProcessor::ProcessFrames(
195 const StreamParser::BufferQueue& audio_buffers, 195 const StreamParser::BufferQueue& audio_buffers,
196 const StreamParser::BufferQueue& video_buffers, 196 const StreamParser::BufferQueue& video_buffers,
197 const StreamParser::TextBufferQueueMap& text_map, 197 const StreamParser::TextBufferQueueMap& text_map,
198 base::TimeDelta append_window_start, 198 base::TimeDelta append_window_start,
199 base::TimeDelta append_window_end, 199 base::TimeDelta append_window_end,
200 base::TimeDelta* timestamp_offset) { 200 base::TimeDelta* timestamp_offset) {
201 StreamParser::BufferQueue frames; 201 StreamParser::BufferQueue frames;
202 if (!MergeBufferQueues(audio_buffers, video_buffers, text_map, &frames)) { 202 if (!MergeBufferQueues(audio_buffers, video_buffers, text_map, &frames)) {
203 MEDIA_LOG(ERROR, media_log_) << "Parsed buffers not in DTS sequence"; 203 MEDIA_LOG(ERROR, media_log_) << "Parsed buffers not in DTS sequence";
204 return false; 204 return false;
205 } 205 }
206 206
207 DCHECK(!frames.empty()); 207 DCHECK(!frames.empty());
208 208
209 // Implements the coded frame processing algorithm's outer loop for step 1. 209 // Implements the coded frame processing algorithm's outer loop for step 1.
210 // Note that ProcessFrame() implements an inner loop for a single frame that 210 // Note that ProcessFrame() implements an inner loop for a single frame that
211 // handles "jump to the Loop Top step to restart processing of the current 211 // handles "jump to the Loop Top step to restart processing of the current
212 // coded frame" per April 1, 2014 MSE spec editor's draft: 212 // coded frame" per June 9, 2016 MSE spec editor's draft:
213 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/ 213 // https://rawgit.com/w3c/media-source/d8f901f22/
chcunningham 2016/06/15 18:26:21 I noticed these old links are completely busted. D
wolenetz 2016/06/15 18:31:31 They're better. Permanent they may not be (depends
214 // media-source.html#sourcebuffer-coded-frame-processing 214 // index.html#sourcebuffer-coded-frame-processing
215 // 1. For each coded frame in the media segment run the following steps: 215 // 1. For each coded frame in the media segment run the following steps:
216 for (StreamParser::BufferQueue::const_iterator frames_itr = frames.begin(); 216 for (StreamParser::BufferQueue::const_iterator frames_itr = frames.begin();
217 frames_itr != frames.end(); ++frames_itr) { 217 frames_itr != frames.end(); ++frames_itr) {
218 if (!ProcessFrame(*frames_itr, append_window_start, append_window_end, 218 if (!ProcessFrame(*frames_itr, append_window_start, append_window_end,
219 timestamp_offset)) { 219 timestamp_offset)) {
220 FlushProcessedFrames(); 220 FlushProcessedFrames();
221 return false; 221 return false;
222 } 222 }
223 } 223 }
224 224
225 if (!FlushProcessedFrames()) 225 if (!FlushProcessedFrames())
226 return false; 226 return false;
227 227
228 // 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element. 228 // 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element.
229 229
230 // Step 5: 230 // 5. If the media segment contains data beyond the current duration, then run
231 // the duration change algorithm with new duration set to the maximum of
232 // the current duration and the group end timestamp.
231 update_duration_cb_.Run(group_end_timestamp_); 233 update_duration_cb_.Run(group_end_timestamp_);
232 234
233 return true; 235 return true;
234 } 236 }
235 237
236 void FrameProcessor::SetGroupStartTimestampIfInSequenceMode( 238 void FrameProcessor::SetGroupStartTimestampIfInSequenceMode(
237 base::TimeDelta timestamp_offset) { 239 base::TimeDelta timestamp_offset) {
238 DVLOG(2) << __FUNCTION__ << "(" << timestamp_offset.InSecondsF() << ")"; 240 DVLOG(2) << __FUNCTION__ << "(" << timestamp_offset.InSecondsF() << ")";
239 DCHECK(kNoTimestamp() != timestamp_offset); 241 DCHECK(kNoTimestamp() != timestamp_offset);
240 if (sequence_mode_) 242 if (sequence_mode_)
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after
448 450
449 return processed_buffer; 451 return processed_buffer;
450 } 452 }
451 453
452 bool FrameProcessor::ProcessFrame( 454 bool FrameProcessor::ProcessFrame(
453 const scoped_refptr<StreamParserBuffer>& frame, 455 const scoped_refptr<StreamParserBuffer>& frame,
454 base::TimeDelta append_window_start, 456 base::TimeDelta append_window_start,
455 base::TimeDelta append_window_end, 457 base::TimeDelta append_window_end,
456 base::TimeDelta* timestamp_offset) { 458 base::TimeDelta* timestamp_offset) {
457 // Implements the loop within step 1 of the coded frame processing algorithm 459 // Implements the loop within step 1 of the coded frame processing algorithm
458 // for a single input frame per April 1, 2014 MSE spec editor's draft: 460 // for a single input frame per June 9, 2016 MSE spec editor's draft:
459 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/ 461 // https://rawgit.com/w3c/media-source/d8f901f22/
460 // media-source.html#sourcebuffer-coded-frame-processing 462 // index.html#sourcebuffer-coded-frame-processing
461
462 while (true) { 463 while (true) {
463 // 1. Loop Top: Let presentation timestamp be a double precision floating 464 // 1. Loop Top:
464 // point representation of the coded frame's presentation timestamp in 465 // Otherwise case: (See MediaSourceState's |auto_update_timestamp_offset_|,
chcunningham 2016/06/15 18:26:21 Am I right that "generate timestamps" case is hand
wolenetz 2016/06/15 18:31:31 Correct (in conjunction with MediaSourceState logi
465 // seconds. 466 // too).
466 // 2. Let decode timestamp be a double precision floating point 467 // 1.1. Let presentation timestamp be a double precision floating point
467 // representation of the coded frame's decode timestamp in seconds. 468 // representation of the coded frame's presentation timestamp in
468 // 3. Let frame duration be a double precision floating point representation 469 // seconds.
470 // 1.2. Let decode timestamp be a double precision floating point
471 // representation of the coded frame's decode timestamp in seconds.
472 // 2. Let frame duration be a double precision floating point representation
469 // of the coded frame's duration in seconds. 473 // of the coded frame's duration in seconds.
470 // We use base::TimeDelta and DecodeTimestamp instead of double. 474 // We use base::TimeDelta and DecodeTimestamp instead of double.
471 base::TimeDelta presentation_timestamp = frame->timestamp(); 475 base::TimeDelta presentation_timestamp = frame->timestamp();
472 DecodeTimestamp decode_timestamp = frame->GetDecodeTimestamp(); 476 DecodeTimestamp decode_timestamp = frame->GetDecodeTimestamp();
473 base::TimeDelta frame_duration = frame->duration(); 477 base::TimeDelta frame_duration = frame->duration();
474 478
475 DVLOG(3) << __FUNCTION__ << ": Processing frame " 479 DVLOG(3) << __FUNCTION__ << ": Processing frame "
476 << "Type=" << frame->type() 480 << "Type=" << frame->type()
477 << ", TrackID=" << frame->track_id() 481 << ", TrackID=" << frame->track_id()
478 << ", PTS=" << presentation_timestamp.InSecondsF() 482 << ", PTS=" << presentation_timestamp.InSecondsF()
(...skipping 20 matching lines...) Expand all
499 << "Parsed " << frame->GetTypeName() << " frame has DTS " 503 << "Parsed " << frame->GetTypeName() << " frame has DTS "
500 << decode_timestamp.InMicroseconds() 504 << decode_timestamp.InMicroseconds()
501 << "us, which is after the frame's PTS " 505 << "us, which is after the frame's PTS "
502 << presentation_timestamp.InMicroseconds() << "us"; 506 << presentation_timestamp.InMicroseconds() << "us";
503 DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS(" 507 DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS("
504 << decode_timestamp.InSecondsF() << ") > PTS(" 508 << decode_timestamp.InSecondsF() << ") > PTS("
505 << presentation_timestamp.InSecondsF() 509 << presentation_timestamp.InSecondsF()
506 << "), frame type=" << frame->GetTypeName(); 510 << "), frame type=" << frame->GetTypeName();
507 } 511 }
508 512
509 // TODO(acolwell/wolenetz): All stream parsers must emit valid (positive) 513 // All stream parsers must emit valid (non-negative) frame durations.
510 // frame durations. For now, we allow non-negative frame duration. 514 // Note that duration of 0 can occur for at least WebM alt-ref frames.
511 // See http://crbug.com/351166.
512 if (frame_duration == kNoTimestamp()) { 515 if (frame_duration == kNoTimestamp()) {
513 MEDIA_LOG(ERROR, media_log_) 516 MEDIA_LOG(ERROR, media_log_)
514 << "Unknown duration for " << frame->GetTypeName() << " frame at PTS " 517 << "Unknown duration for " << frame->GetTypeName() << " frame at PTS "
515 << presentation_timestamp.InMicroseconds() << "us"; 518 << presentation_timestamp.InMicroseconds() << "us";
516 return false; 519 return false;
517 } 520 }
518 if (frame_duration < base::TimeDelta()) { 521 if (frame_duration < base::TimeDelta()) {
519 MEDIA_LOG(ERROR, media_log_) 522 MEDIA_LOG(ERROR, media_log_)
520 << "Negative duration " << frame_duration.InMicroseconds() 523 << "Negative duration " << frame_duration.InMicroseconds()
521 << "us for " << frame->GetTypeName() << " frame at PTS " 524 << "us for " << frame->GetTypeName() << " frame at PTS "
522 << presentation_timestamp.InMicroseconds() << "us"; 525 << presentation_timestamp.InMicroseconds() << "us";
523 return false; 526 return false;
524 } 527 }
525 528
526 // 4. If mode equals "sequence" and group start timestamp is set, then run 529 // 3. If mode equals "sequence" and group start timestamp is set, then run
527 // the following steps: 530 // the following steps:
528 if (sequence_mode_ && group_start_timestamp_ != kNoTimestamp()) { 531 if (sequence_mode_ && group_start_timestamp_ != kNoTimestamp()) {
529 // 4.1. Set timestampOffset equal to group start timestamp - 532 // 3.1. Set timestampOffset equal to group start timestamp -
530 // presentation timestamp. 533 // presentation timestamp.
531 *timestamp_offset = group_start_timestamp_ - presentation_timestamp; 534 *timestamp_offset = group_start_timestamp_ - presentation_timestamp;
532 535
533 DVLOG(3) << __FUNCTION__ << ": updated timestampOffset is now " 536 DVLOG(3) << __FUNCTION__ << ": updated timestampOffset is now "
534 << timestamp_offset->InSecondsF(); 537 << timestamp_offset->InSecondsF();
535 538
536 // 4.2. Set group end timestamp equal to group start timestamp. 539 // 3.2. Set group end timestamp equal to group start timestamp.
537 group_end_timestamp_ = group_start_timestamp_; 540 group_end_timestamp_ = group_start_timestamp_;
538 541
539 // 4.3. Set the need random access point flag on all track buffers to 542 // 3.3. Set the need random access point flag on all track buffers to
540 // true. 543 // true.
541 SetAllTrackBuffersNeedRandomAccessPoint(); 544 SetAllTrackBuffersNeedRandomAccessPoint();
542 545
543 // 4.4. Unset group start timestamp. 546 // 3.4. Unset group start timestamp.
544 group_start_timestamp_ = kNoTimestamp(); 547 group_start_timestamp_ = kNoTimestamp();
545 } 548 }
546 549
547 // 5. If timestampOffset is not 0, then run the following steps: 550 // 4. If timestampOffset is not 0, then run the following steps:
548 if (!timestamp_offset->is_zero()) { 551 if (!timestamp_offset->is_zero()) {
549 // 5.1. Add timestampOffset to the presentation timestamp. 552 // 4.1. Add timestampOffset to the presentation timestamp.
550 // Note: |frame| PTS is only updated if it survives discontinuity 553 // Note: |frame| PTS is only updated if it survives discontinuity
551 // processing. 554 // processing.
552 presentation_timestamp += *timestamp_offset; 555 presentation_timestamp += *timestamp_offset;
553 556
554 // 5.2. Add timestampOffset to the decode timestamp. 557 // 4.2. Add timestampOffset to the decode timestamp.
555 // Frame DTS is only updated if it survives discontinuity processing. 558 // Frame DTS is only updated if it survives discontinuity processing.
556 decode_timestamp += *timestamp_offset; 559 decode_timestamp += *timestamp_offset;
557 } 560 }
558 561
559 // 6. Let track buffer equal the track buffer that the coded frame will be 562 // 5. Let track buffer equal the track buffer that the coded frame will be
560 // added to. 563 // added to.
561 564
562 // Remap audio and video track types to their special singleton identifiers. 565 // Remap audio and video track types to their special singleton identifiers.
563 StreamParser::TrackId track_id = kAudioTrackId; 566 StreamParser::TrackId track_id = kAudioTrackId;
564 switch (frame->type()) { 567 switch (frame->type()) {
565 case DemuxerStream::AUDIO: 568 case DemuxerStream::AUDIO:
566 break; 569 break;
567 case DemuxerStream::VIDEO: 570 case DemuxerStream::VIDEO:
568 track_id = kVideoTrackId; 571 track_id = kVideoTrackId;
569 break; 572 break;
570 case DemuxerStream::TEXT: 573 case DemuxerStream::TEXT:
571 track_id = frame->track_id(); 574 track_id = frame->track_id();
572 break; 575 break;
573 case DemuxerStream::UNKNOWN: 576 case DemuxerStream::UNKNOWN:
574 case DemuxerStream::NUM_TYPES: 577 case DemuxerStream::NUM_TYPES:
575 DCHECK(false) << ": Invalid frame type " << frame->type(); 578 DCHECK(false) << ": Invalid frame type " << frame->type();
576 return false; 579 return false;
577 } 580 }
578 581
579 MseTrackBuffer* track_buffer = FindTrack(track_id); 582 MseTrackBuffer* track_buffer = FindTrack(track_id);
580 if (!track_buffer) { 583 if (!track_buffer) {
581 MEDIA_LOG(ERROR, media_log_) 584 MEDIA_LOG(ERROR, media_log_)
582 << "Unknown track with type " << frame->GetTypeName() 585 << "Unknown track with type " << frame->GetTypeName()
583 << ", frame processor track id " << track_id 586 << ", frame processor track id " << track_id
584 << ", and parser track id " << frame->track_id(); 587 << ", and parser track id " << frame->track_id();
585 return false; 588 return false;
586 } 589 }
587 590
588 // 7. If last decode timestamp for track buffer is set and decode timestamp 591 // 6. If last decode timestamp for track buffer is set and decode timestamp
589 // is less than last decode timestamp 592 // is less than last decode timestamp
590 // OR 593 // OR
591 // If last decode timestamp for track buffer is set and the difference 594 // If last decode timestamp for track buffer is set and the difference
592 // between decode timestamp and last decode timestamp is greater than 2 595 // between decode timestamp and last decode timestamp is greater than 2
593 // times last frame duration: 596 // times last frame duration:
594 DecodeTimestamp track_last_decode_timestamp = 597 DecodeTimestamp track_last_decode_timestamp =
595 track_buffer->last_decode_timestamp(); 598 track_buffer->last_decode_timestamp();
596 if (track_last_decode_timestamp != kNoDecodeTimestamp()) { 599 if (track_last_decode_timestamp != kNoDecodeTimestamp()) {
597 base::TimeDelta track_dts_delta = 600 base::TimeDelta track_dts_delta =
598 decode_timestamp - track_last_decode_timestamp; 601 decode_timestamp - track_last_decode_timestamp;
599 if (track_dts_delta < base::TimeDelta() || 602 if (track_dts_delta < base::TimeDelta() ||
600 track_dts_delta > 2 * track_buffer->last_frame_duration()) { 603 track_dts_delta > 2 * track_buffer->last_frame_duration()) {
601 DCHECK(in_coded_frame_group_); 604 DCHECK(in_coded_frame_group_);
602 // 7.1. If mode equals "segments": Set group end timestamp to 605 // 6.1. If mode equals "segments": Set group end timestamp to
603 // presentation timestamp. 606 // presentation timestamp.
604 // If mode equals "sequence": Set group start timestamp equal to 607 // If mode equals "sequence": Set group start timestamp equal to
605 // the group end timestamp. 608 // the group end timestamp.
606 if (!sequence_mode_) { 609 if (!sequence_mode_) {
607 group_end_timestamp_ = presentation_timestamp; 610 group_end_timestamp_ = presentation_timestamp;
608 // This triggers a discontinuity so we need to treat the next frames 611 // This triggers a discontinuity so we need to treat the next frames
609 // appended within the append window as if they were the beginning of 612 // appended within the append window as if they were the beginning of
610 // a new coded frame group. 613 // a new coded frame group.
611 in_coded_frame_group_ = false; 614 in_coded_frame_group_ = false;
612 } else { 615 } else {
613 DVLOG(3) << __FUNCTION__ << " : Sequence mode discontinuity, GETS: " 616 DVLOG(3) << __FUNCTION__ << " : Sequence mode discontinuity, GETS: "
614 << group_end_timestamp_.InSecondsF(); 617 << group_end_timestamp_.InSecondsF();
615 DCHECK(kNoTimestamp() != group_end_timestamp_); 618 DCHECK(kNoTimestamp() != group_end_timestamp_);
616 group_start_timestamp_ = group_end_timestamp_; 619 group_start_timestamp_ = group_end_timestamp_;
617 } 620 }
618 621
619 // 7.2. - 7.5.: 622 // 6.2. - 6.5.:
620 Reset(); 623 Reset();
621 624
622 // 7.6. Jump to the Loop Top step above to restart processing of the 625 // 6.6. Jump to the Loop Top step above to restart processing of the
623 // current coded frame. 626 // current coded frame.
624 DVLOG(3) << __FUNCTION__ << ": Discontinuity: reprocessing frame"; 627 DVLOG(3) << __FUNCTION__ << ": Discontinuity: reprocessing frame";
625 continue; 628 continue;
626 } 629 }
627 } 630 }
628 631
629 // 9. Let frame end timestamp equal the sum of presentation timestamp and 632 // 7. Let frame end timestamp equal the sum of presentation timestamp and
630 // frame duration. 633 // frame duration.
631 base::TimeDelta frame_end_timestamp = 634 base::TimeDelta frame_end_timestamp =
632 presentation_timestamp + frame_duration; 635 presentation_timestamp + frame_duration;
633 636
634 // 10. If presentation timestamp is less than appendWindowStart, then set 637 // 8. If presentation timestamp is less than appendWindowStart, then set
635 // the need random access point flag to true, drop the coded frame, and 638 // the need random access point flag to true, drop the coded frame, and
636 // jump to the top of the loop to start processing the next coded 639 // jump to the top of the loop to start processing the next coded
637 // frame. 640 // frame.
638 // Note: We keep the result of partial discard of a buffer that overlaps 641 // Note: We keep the result of partial discard of a buffer that overlaps
639 // |append_window_start| and does not end after |append_window_end|. 642 // |append_window_start| and does not end after |append_window_end|,
640 // 11. If frame end timestamp is greater than appendWindowEnd, then set the 643 // for streams which support partial trimming.
641 // need random access point flag to true, drop the coded frame, and jump 644 // 9. If frame end timestamp is greater than appendWindowEnd, then set the
642 // to the top of the loop to start processing the next coded frame. 645 // need random access point flag to true, drop the coded frame, and jump
646 // to the top of the loop to start processing the next coded frame.
647 // Note: We keep the result of partial discard of a buffer that overlaps
648 // |append_window_end|, for streams which support partial trimming.
643 frame->set_timestamp(presentation_timestamp); 649 frame->set_timestamp(presentation_timestamp);
644 frame->SetDecodeTimestamp(decode_timestamp); 650 frame->SetDecodeTimestamp(decode_timestamp);
645 if (track_buffer->stream()->supports_partial_append_window_trimming() && 651 if (track_buffer->stream()->supports_partial_append_window_trimming() &&
646 HandlePartialAppendWindowTrimming(append_window_start, 652 HandlePartialAppendWindowTrimming(append_window_start,
647 append_window_end, 653 append_window_end,
648 frame)) { 654 frame)) {
649 // |frame| has been partially trimmed or had preroll added. Though 655 // |frame| has been partially trimmed or had preroll added. Though
650 // |frame|'s duration may have changed, do not update |frame_duration| 656 // |frame|'s duration may have changed, do not update |frame_duration|
651 // here, so |track_buffer|'s last frame duration update uses original 657 // here, so |track_buffer|'s last frame duration update uses original
652 // frame duration and reduces spurious discontinuity detection. 658 // frame duration and reduces spurious discontinuity detection.
653 decode_timestamp = frame->GetDecodeTimestamp(); 659 decode_timestamp = frame->GetDecodeTimestamp();
654 presentation_timestamp = frame->timestamp(); 660 presentation_timestamp = frame->timestamp();
655 frame_end_timestamp = frame->timestamp() + frame->duration(); 661 frame_end_timestamp = frame->timestamp() + frame->duration();
656 } 662 }
657 663
658 if (presentation_timestamp < append_window_start || 664 if (presentation_timestamp < append_window_start ||
659 frame_end_timestamp > append_window_end) { 665 frame_end_timestamp > append_window_end) {
660 track_buffer->set_needs_random_access_point(true); 666 track_buffer->set_needs_random_access_point(true);
661 DVLOG(3) << "Dropping frame that is outside append window."; 667 DVLOG(3) << "Dropping frame that is outside append window.";
662 return true; 668 return true;
663 } 669 }
664 670
665 // Note: This step is relocated, versus April 1 spec, to allow append window
666 // processing to first filter coded frames shifted by |timestamp_offset_| in
667 // such a way that their PTS is negative.
668 // 8. If the presentation timestamp or decode timestamp is less than the
669 // presentation start time, then run the end of stream algorithm with the
670 // error parameter set to "decode", and abort these steps.
671 DCHECK(presentation_timestamp >= base::TimeDelta()); 671 DCHECK(presentation_timestamp >= base::TimeDelta());
672 if (decode_timestamp < DecodeTimestamp()) { 672 if (decode_timestamp < DecodeTimestamp()) {
673 // B-frames may still result in negative DTS here after being shifted by 673 // B-frames may still result in negative DTS here after being shifted by
674 // |timestamp_offset_|. 674 // |timestamp_offset_|.
675 // TODO(wolenetz): This is no longer a step in the CFP, since negative DTS
676 // are allowed. Remove this parse failure and error log as part of fixing
677 // PTS/DTS conflation in SourceBufferStream. See https://crbug.com/398141
675 MEDIA_LOG(ERROR, media_log_) 678 MEDIA_LOG(ERROR, media_log_)
676 << frame->GetTypeName() << " frame with PTS " 679 << frame->GetTypeName() << " frame with PTS "
677 << presentation_timestamp.InMicroseconds() << "us has negative DTS " 680 << presentation_timestamp.InMicroseconds() << "us has negative DTS "
678 << decode_timestamp.InMicroseconds() 681 << decode_timestamp.InMicroseconds()
679 << "us after applying timestampOffset, handling any discontinuity, " 682 << "us after applying timestampOffset, handling any discontinuity, "
680 "and filtering against append window"; 683 "and filtering against append window";
681 return false; 684 return false;
682 } 685 }
683 686
684 // 12. If the need random access point flag on track buffer equals true, 687 // 10. If the need random access point flag on track buffer equals true,
685 // then run the following steps: 688 // then run the following steps:
686 if (track_buffer->needs_random_access_point()) { 689 if (track_buffer->needs_random_access_point()) {
687 // 12.1. If the coded frame is not a random access point, then drop the 690 // 10.1. If the coded frame is not a random access point, then drop the
688 // coded frame and jump to the top of the loop to start processing 691 // coded frame and jump to the top of the loop to start processing
689 // the next coded frame. 692 // the next coded frame.
690 if (!frame->is_key_frame()) { 693 if (!frame->is_key_frame()) {
691 DVLOG(3) << __FUNCTION__ 694 DVLOG(3) << __FUNCTION__
692 << ": Dropping frame that is not a random access point"; 695 << ": Dropping frame that is not a random access point";
693 return true; 696 return true;
694 } 697 }
695 698
696 // 12.2. Set the need random access point flag on track buffer to false. 699 // 10.2. Set the need random access point flag on track buffer to false.
697 track_buffer->set_needs_random_access_point(false); 700 track_buffer->set_needs_random_access_point(false);
698 } 701 }
699 702
700 // We now have a processed buffer to append to the track buffer's stream. 703 // We now have a processed buffer to append to the track buffer's stream.
701 // If it is the first in a new coded frame group (such as following a 704 // If it is the first in a new coded frame group (such as following a
702 // discontinuity), notify all the track buffers' streams that a coded frame 705 // discontinuity), notify all the track buffers' streams that a coded frame
703 // group is starting. 706 // group is starting.
704 if (!in_coded_frame_group_) { 707 if (!in_coded_frame_group_) {
705 // First, complete the append to track buffer streams of the previous 708 // First, complete the append to track buffer streams of the previous
706 // coded frame group's frames, if any. 709 // coded frame group's frames, if any.
707 if (!FlushProcessedFrames()) 710 if (!FlushProcessedFrames())
708 return false; 711 return false;
709 712
710 // TODO(acolwell/wolenetz): This should be changed to a presentation 713 // TODO(acolwell/wolenetz): This should be changed to a presentation
711 // timestamp. See http://crbug.com/402502 714 // timestamp. See http://crbug.com/402502
712 NotifyStartOfCodedFrameGroup(decode_timestamp); 715 NotifyStartOfCodedFrameGroup(decode_timestamp);
713 in_coded_frame_group_ = true; 716 in_coded_frame_group_ = true;
714 } 717 }
715 718
716 DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, " 719 DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, "
717 << "PTS=" << presentation_timestamp.InSecondsF() 720 << "PTS=" << presentation_timestamp.InSecondsF()
718 << ", DTS=" << decode_timestamp.InSecondsF(); 721 << ", DTS=" << decode_timestamp.InSecondsF();
719 722
720 // Steps 13-18: Note, we optimize by appending groups of contiguous 723 // Steps 11-16: Note, we optimize by appending groups of contiguous
721 // processed frames for each track buffer at end of ProcessFrames() or prior 724 // processed frames for each track buffer at end of ProcessFrames() or prior
722 // to NotifyStartOfCodedFrameGroup(). 725 // to NotifyStartOfCodedFrameGroup().
723 track_buffer->EnqueueProcessedFrame(frame); 726 track_buffer->EnqueueProcessedFrame(frame);
724 727
725 // 19. Set last decode timestamp for track buffer to decode timestamp. 728 // 17. Set last decode timestamp for track buffer to decode timestamp.
726 track_buffer->set_last_decode_timestamp(decode_timestamp); 729 track_buffer->set_last_decode_timestamp(decode_timestamp);
727 730
728 // 20. Set last frame duration for track buffer to frame duration. 731 // 18. Set last frame duration for track buffer to frame duration.
729 track_buffer->set_last_frame_duration(frame_duration); 732 track_buffer->set_last_frame_duration(frame_duration);
730 733
731 // 21. If highest presentation timestamp for track buffer is unset or frame 734 // 19. If highest presentation timestamp for track buffer is unset or frame
732 // end timestamp is greater than highest presentation timestamp, then 735 // end timestamp is greater than highest presentation timestamp, then
733 // set highest presentation timestamp for track buffer to frame end 736 // set highest presentation timestamp for track buffer to frame end
734 // timestamp. 737 // timestamp.
735 track_buffer->SetHighestPresentationTimestampIfIncreased( 738 track_buffer->SetHighestPresentationTimestampIfIncreased(
736 frame_end_timestamp); 739 frame_end_timestamp);
737 740
738 // 22. If frame end timestamp is greater than group end timestamp, then set 741 // 20. If frame end timestamp is greater than group end timestamp, then set
739 // group end timestamp equal to frame end timestamp. 742 // group end timestamp equal to frame end timestamp.
740 if (frame_end_timestamp > group_end_timestamp_) 743 if (frame_end_timestamp > group_end_timestamp_)
741 group_end_timestamp_ = frame_end_timestamp; 744 group_end_timestamp_ = frame_end_timestamp;
742 DCHECK(group_end_timestamp_ >= base::TimeDelta()); 745 DCHECK(group_end_timestamp_ >= base::TimeDelta());
743 746
747 // Step 21 is currently handled differently. See MediaSourceState's
748 // |auto_update_timestamp_offset_|.
744 return true; 749 return true;
745 } 750 }
746 751
747 NOTREACHED(); 752 NOTREACHED();
748 return false; 753 return false;
749 } 754 }
750 755
751 } // namespace media 756 } // namespace media
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698