Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(189)

Side by Side Diff: media/filters/ffmpeg_demuxer.cc

Issue 335273002: Fix seeking when the start time is non-zero. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fixes. Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/filters/ffmpeg_demuxer.h" 5 #include "media/filters/ffmpeg_demuxer.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <string> 8 #include <string>
9 9
10 #include "base/base64.h" 10 #include "base/base64.h"
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
48 } 48 }
49 49
50 return base::Time(); 50 return base::Time();
51 } 51 }
52 52
53 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) { 53 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) {
54 return base::TimeDelta::FromMicroseconds( 54 return base::TimeDelta::FromMicroseconds(
55 frames * base::Time::kMicrosecondsPerSecond / sample_rate); 55 frames * base::Time::kMicrosecondsPerSecond / sample_rate);
56 } 56 }
57 57
58 static base::TimeDelta ExtractStartTime(AVFormatContext* format_context,
59 AVStream* stream) {
60 if (stream->start_time == static_cast<int64_t>(AV_NOPTS_VALUE))
61 return kNoTimestamp();
62
63 // First try to use the |start_time| value directly.
64 base::TimeDelta start_time =
65 ConvertFromTimeBase(stream->time_base, stream->start_time);
66
67 // Next see if the first buffered pts value is usable.
68 if (stream->pts_buffer[0] != static_cast<int64_t>(AV_NOPTS_VALUE)) {
69 const base::TimeDelta buffered_pts =
70 ConvertFromTimeBase(stream->time_base, stream->pts_buffer[0]);
71 if (buffered_pts < start_time)
72 start_time = buffered_pts;
73 }
74
75 if (!format_context->packet_buffer)
76 return start_time;
77
78 // Compare against the packets buffered during avformat_find_stream_info().
79 struct AVPacketList* packet_buffer = format_context->packet_buffer;
80 while (packet_buffer != format_context->packet_buffer_end) {
acolwell GONE FROM CHROMIUM 2014/06/17 17:41:22 How deep could this buffer be? Should we be concer
DaleCurtis 2014/06/17 20:52:30 2500 packets across all streams: https://code.goog
DaleCurtis 2014/06/17 21:12:51 Note: With the vector approach, it'd be easy to ru
acolwell GONE FROM CHROMIUM 2014/06/17 21:39:43 You could still do that with a local vector instea
DaleCurtis 2014/06/17 22:30:41 Done.
81 if (packet_buffer->pkt.stream_index == stream->index) {
82 const base::TimeDelta packet_pts =
83 ConvertFromTimeBase(stream->time_base, packet_buffer->pkt.pts);
84 if (packet_pts < start_time)
85 start_time = packet_pts;
86 }
87 packet_buffer = packet_buffer->next;
88 }
89
90 // NOTE: Do not use AVStream->first_dts since |start_time| should be a
91 // presentation timestamp.
92 return start_time;
93 }
94
58 // 95 //
59 // FFmpegDemuxerStream 96 // FFmpegDemuxerStream
60 // 97 //
61 FFmpegDemuxerStream::FFmpegDemuxerStream( 98 FFmpegDemuxerStream::FFmpegDemuxerStream(
62 FFmpegDemuxer* demuxer, 99 FFmpegDemuxer* demuxer,
63 AVStream* stream) 100 AVStream* stream,
101 bool discard_negative_timestamps)
64 : demuxer_(demuxer), 102 : demuxer_(demuxer),
65 task_runner_(base::MessageLoopProxy::current()), 103 task_runner_(base::MessageLoopProxy::current()),
66 stream_(stream), 104 stream_(stream),
67 type_(UNKNOWN), 105 type_(UNKNOWN),
68 end_of_stream_(false), 106 end_of_stream_(false),
69 last_packet_timestamp_(kNoTimestamp()), 107 last_packet_timestamp_(kNoTimestamp()),
70 bitstream_converter_enabled_(false) { 108 bitstream_converter_enabled_(false),
109 discard_negative_timestamps_(discard_negative_timestamps) {
71 DCHECK(demuxer_); 110 DCHECK(demuxer_);
72 111
73 bool is_encrypted = false; 112 bool is_encrypted = false;
74 113
75 // Determine our media format. 114 // Determine our media format.
76 switch (stream->codec->codec_type) { 115 switch (stream->codec->codec_type) {
77 case AVMEDIA_TYPE_AUDIO: 116 case AVMEDIA_TYPE_AUDIO:
78 type_ = AUDIO; 117 type_ = AUDIO;
79 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); 118 AVStreamToAudioDecoderConfig(stream, &audio_config_, true);
80 is_encrypted = audio_config_.is_encrypted(); 119 is_encrypted = audio_config_.is_encrypted();
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
219 audio_decoder_config().samples_per_second(); 258 audio_decoder_config().samples_per_second();
220 buffer->set_discard_padding(std::make_pair( 259 buffer->set_discard_padding(std::make_pair(
221 FramesToTimeDelta(discard_front_samples, samples_per_second), 260 FramesToTimeDelta(discard_front_samples, samples_per_second),
222 FramesToTimeDelta(discard_end_samples, samples_per_second))); 261 FramesToTimeDelta(discard_end_samples, samples_per_second)));
223 } 262 }
224 263
225 if (decrypt_config) 264 if (decrypt_config)
226 buffer->set_decrypt_config(decrypt_config.Pass()); 265 buffer->set_decrypt_config(decrypt_config.Pass());
227 } 266 }
228 267
229 buffer->set_timestamp(ConvertStreamTimestamp( 268 buffer->set_duration(
230 stream_->time_base, packet->pts)); 269 ConvertStreamTimestamp(stream_->time_base, packet->duration));
231 buffer->set_duration(ConvertStreamTimestamp( 270
232 stream_->time_base, packet->duration)); 271 // Note: If pts is AV_NOPTS_VALUE, stream_timestamp will be kNoTimestamp().
233 if (buffer->timestamp() != kNoTimestamp() && 272 const base::TimeDelta stream_timestamp =
234 last_packet_timestamp_ != kNoTimestamp() && 273 ConvertStreamTimestamp(stream_->time_base, packet->pts);
235 last_packet_timestamp_ < buffer->timestamp()) { 274
236 buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp()); 275 if (stream_timestamp != kNoTimestamp()) {
237 demuxer_->NotifyBufferingChanged(); 276 buffer->set_timestamp(stream_timestamp - demuxer_->start_time());
277
278 // If enabled, mark packets with negative timestamps for post-decode
279 // discard.
280 if (discard_negative_timestamps_ && stream_timestamp < base::TimeDelta()) {
281 if (stream_timestamp + buffer->duration() < base::TimeDelta()) {
282 // Discard the entier packet if it's entirely before zero.
283 buffer->set_discard_padding(
284 std::make_pair(kInfiniteDuration(), base::TimeDelta()));
285 } else {
286 // Only discard part of the frame if it overlaps zero.
287 buffer->set_discard_padding(
288 std::make_pair(-stream_timestamp, base::TimeDelta()));
289 }
290 }
291
292 if (last_packet_timestamp_ != kNoTimestamp() &&
293 last_packet_timestamp_ < buffer->timestamp()) {
294 buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp());
295 demuxer_->NotifyBufferingChanged();
296 }
297
298 // The demuxer should always output positive timestamps.
299 DCHECK(buffer->timestamp() >= base::TimeDelta());
300 } else {
301 buffer->set_timestamp(kNoTimestamp());
238 } 302 }
303
304 // TODO(dalecurtis): This allows transitions from <valid ts> -> <no timestamp>
305 // which shouldn't be allowed. See http://crbug.com/384532
239 last_packet_timestamp_ = buffer->timestamp(); 306 last_packet_timestamp_ = buffer->timestamp();
240 307
241 buffer_queue_.Push(buffer); 308 buffer_queue_.Push(buffer);
242 SatisfyPendingRead(); 309 SatisfyPendingRead();
243 } 310 }
244 311
245 void FFmpegDemuxerStream::SetEndOfStream() { 312 void FFmpegDemuxerStream::SetEndOfStream() {
246 DCHECK(task_runner_->BelongsToCurrentThread()); 313 DCHECK(task_runner_->BelongsToCurrentThread());
247 end_of_stream_ = true; 314 end_of_stream_ = true;
248 SatisfyPendingRead(); 315 SatisfyPendingRead();
(...skipping 12 matching lines...) Expand all
261 buffer_queue_.Clear(); 328 buffer_queue_.Clear();
262 if (!read_cb_.is_null()) { 329 if (!read_cb_.is_null()) {
263 base::ResetAndReturn(&read_cb_).Run( 330 base::ResetAndReturn(&read_cb_).Run(
264 DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer()); 331 DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer());
265 } 332 }
266 demuxer_ = NULL; 333 demuxer_ = NULL;
267 stream_ = NULL; 334 stream_ = NULL;
268 end_of_stream_ = true; 335 end_of_stream_ = true;
269 } 336 }
270 337
271 base::TimeDelta FFmpegDemuxerStream::duration() {
272 return duration_;
273 }
274
275 DemuxerStream::Type FFmpegDemuxerStream::type() { 338 DemuxerStream::Type FFmpegDemuxerStream::type() {
276 DCHECK(task_runner_->BelongsToCurrentThread()); 339 DCHECK(task_runner_->BelongsToCurrentThread());
277 return type_; 340 return type_;
278 } 341 }
279 342
280 void FFmpegDemuxerStream::Read(const ReadCB& read_cb) { 343 void FFmpegDemuxerStream::Read(const ReadCB& read_cb) {
281 DCHECK(task_runner_->BelongsToCurrentThread()); 344 DCHECK(task_runner_->BelongsToCurrentThread());
282 CHECK(read_cb_.is_null()) << "Overlapping reads are not supported"; 345 CHECK(read_cb_.is_null()) << "Overlapping reads are not supported";
283 read_cb_ = BindToCurrentLoop(read_cb); 346 read_cb_ = BindToCurrentLoop(read_cb);
284 347
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
438 } 501 }
439 502
440 void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) { 503 void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
441 DCHECK(task_runner_->BelongsToCurrentThread()); 504 DCHECK(task_runner_->BelongsToCurrentThread());
442 CHECK(!pending_seek_); 505 CHECK(!pending_seek_);
443 506
444 // TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|, 507 // TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|,
445 // otherwise we can end up waiting for a pre-seek read to complete even though 508 // otherwise we can end up waiting for a pre-seek read to complete even though
446 // we know we're going to drop it on the floor. 509 // we know we're going to drop it on the floor.
447 510
448 // Always seek to a timestamp less than or equal to the desired timestamp. 511 // FFmpeg requires seeks to be adjusted according to the lowest starting time.
449 int flags = AVSEEK_FLAG_BACKWARD; 512 const base::TimeDelta seek_time = time + start_time_;
450 513
451 // Passing -1 as our stream index lets FFmpeg pick a default stream. FFmpeg 514 // Find the first video stream which contains |seek_time| or if none exists,
452 // will attempt to use the lowest-index video stream, if present, followed by 515 // the stream with the lowest starting timestamp that contains |seek_time|.
453 // the lowest-index audio stream. 516 AVStream* seeking_stream = NULL;
517 for (size_t i = 0; i < stream_start_times_.size(); ++i) {
518 if (stream_start_times_[i] == kNoTimestamp())
519 continue;
520
521 if (seek_time < stream_start_times_[i])
522 continue;
523
524 // If we find a video stream in range, always use it for seeking. Otherwise
525 // we'll get corruption during playback if we land off of a keyframe.
526 if (streams_[i]->type() == DemuxerStream::VIDEO) {
527 seeking_stream = glue_->format_context()->streams[i];
528 break;
529 }
530
531 // Otherwise use the stream with the lowest starting timestamp.
532 if (!seeking_stream ||
533 stream_start_times_[i] < stream_start_times_[seeking_stream->index]) {
534 seeking_stream = glue_->format_context()->streams[i];
535 }
536 }
537
454 pending_seek_ = true; 538 pending_seek_ = true;
455 base::PostTaskAndReplyWithResult( 539 base::PostTaskAndReplyWithResult(
456 blocking_thread_.message_loop_proxy().get(), 540 blocking_thread_.message_loop_proxy().get(),
457 FROM_HERE, 541 FROM_HERE,
458 base::Bind(&av_seek_frame, 542 base::Bind(&av_seek_frame,
459 glue_->format_context(), 543 glue_->format_context(),
460 -1, 544 seeking_stream->index,
461 time.InMicroseconds(), 545 ConvertToTimeBase(seeking_stream->time_base, seek_time),
462 flags), 546 // Always seek to a timestamp <= to the desired timestamp.
547 AVSEEK_FLAG_BACKWARD),
463 base::Bind( 548 base::Bind(
464 &FFmpegDemuxer::OnSeekFrameDone, weak_factory_.GetWeakPtr(), cb)); 549 &FFmpegDemuxer::OnSeekFrameDone, weak_factory_.GetWeakPtr(), cb));
465 } 550 }
466 551
467 void FFmpegDemuxer::Initialize(DemuxerHost* host, 552 void FFmpegDemuxer::Initialize(DemuxerHost* host,
468 const PipelineStatusCB& status_cb, 553 const PipelineStatusCB& status_cb,
469 bool enable_text_tracks) { 554 bool enable_text_tracks) {
470 DCHECK(task_runner_->BelongsToCurrentThread()); 555 DCHECK(task_runner_->BelongsToCurrentThread());
471 host_ = host; 556 host_ = host;
472 text_enabled_ = enable_text_tracks; 557 text_enabled_ = enable_text_tracks;
(...skipping 28 matching lines...) Expand all
501 DemuxerStream::Type type) const { 586 DemuxerStream::Type type) const {
502 StreamVector::const_iterator iter; 587 StreamVector::const_iterator iter;
503 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { 588 for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
504 if (*iter && (*iter)->type() == type) { 589 if (*iter && (*iter)->type() == type) {
505 return *iter; 590 return *iter;
506 } 591 }
507 } 592 }
508 return NULL; 593 return NULL;
509 } 594 }
510 595
511 base::TimeDelta FFmpegDemuxer::GetStartTime() const {
512 DCHECK(task_runner_->BelongsToCurrentThread());
513 return start_time_;
514 }
515
516 base::Time FFmpegDemuxer::GetTimelineOffset() const { 596 base::Time FFmpegDemuxer::GetTimelineOffset() const {
517 return timeline_offset_; 597 return timeline_offset_;
518 } 598 }
519 599
520 Demuxer::Liveness FFmpegDemuxer::GetLiveness() const { 600 Demuxer::Liveness FFmpegDemuxer::GetLiveness() const {
521 DCHECK(task_runner_->BelongsToCurrentThread()); 601 DCHECK(task_runner_->BelongsToCurrentThread());
522 return liveness_; 602 return liveness_;
523 } 603 }
524 604
525 void FFmpegDemuxer::AddTextStreams() { 605 void FFmpegDemuxer::AddTextStreams() {
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
614 status_cb.Run(DEMUXER_ERROR_COULD_NOT_PARSE); 694 status_cb.Run(DEMUXER_ERROR_COULD_NOT_PARSE);
615 return; 695 return;
616 } 696 }
617 697
618 // Create demuxer stream entries for each possible AVStream. Each stream 698 // Create demuxer stream entries for each possible AVStream. Each stream
619 // is examined to determine if it is supported or not (is the codec enabled 699 // is examined to determine if it is supported or not (is the codec enabled
620 // for it in this release?). Unsupported streams are skipped, allowing for 700 // for it in this release?). Unsupported streams are skipped, allowing for
621 // partial playback. At least one audio or video stream must be playable. 701 // partial playback. At least one audio or video stream must be playable.
622 AVFormatContext* format_context = glue_->format_context(); 702 AVFormatContext* format_context = glue_->format_context();
623 streams_.resize(format_context->nb_streams); 703 streams_.resize(format_context->nb_streams);
704 stream_start_times_.resize(format_context->nb_streams);
624 705
625 AVStream* audio_stream = NULL; 706 AVStream* audio_stream = NULL;
626 AudioDecoderConfig audio_config; 707 AudioDecoderConfig audio_config;
627 708
628 AVStream* video_stream = NULL; 709 AVStream* video_stream = NULL;
629 VideoDecoderConfig video_config; 710 VideoDecoderConfig video_config;
630 711
712 // If available, |start_time_| will be set to the lowest stream start time.
713 start_time_ = kInfiniteDuration();
714
631 base::TimeDelta max_duration; 715 base::TimeDelta max_duration;
632 for (size_t i = 0; i < format_context->nb_streams; ++i) { 716 for (size_t i = 0; i < format_context->nb_streams; ++i) {
633 AVStream* stream = format_context->streams[i]; 717 AVStream* stream = format_context->streams[i];
634 AVCodecContext* codec_context = stream->codec; 718 const AVCodecContext* codec_context = stream->codec;
635 AVMediaType codec_type = codec_context->codec_type; 719 const AVMediaType codec_type = codec_context->codec_type;
720 bool discard_negative_timestamps = false;
721 stream_start_times_[i] = kNoTimestamp();
636 722
637 if (codec_type == AVMEDIA_TYPE_AUDIO) { 723 if (codec_type == AVMEDIA_TYPE_AUDIO) {
638 if (audio_stream) 724 if (audio_stream)
639 continue; 725 continue;
640 726
641 // Log the codec detected, whether it is supported or not. 727 // Log the codec detected, whether it is supported or not.
642 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec", 728 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec",
643 codec_context->codec_id); 729 codec_context->codec_id);
644 // Ensure the codec is supported. IsValidConfig() also checks that the 730 // Ensure the codec is supported. IsValidConfig() also checks that the
645 // channel layout and sample format are valid. 731 // channel layout and sample format are valid.
646 AVStreamToAudioDecoderConfig(stream, &audio_config, false); 732 AVStreamToAudioDecoderConfig(stream, &audio_config, false);
647 if (!audio_config.IsValidConfig()) 733 if (!audio_config.IsValidConfig())
648 continue; 734 continue;
649 audio_stream = stream; 735 audio_stream = stream;
736
737 // Enable post-decode frame dropping for packets with negative timestamps
738 // as outlined in section A.2 in the Ogg Vorbis spec:
739 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html
740 discard_negative_timestamps =
741 audio_config.codec() == kCodecVorbis &&
742 strcmp(glue_->format_context()->iformat->name, "ogg") == 0;
650 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { 743 } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
651 if (video_stream) 744 if (video_stream)
652 continue; 745 continue;
653 746
654 // Log the codec detected, whether it is supported or not. 747 // Log the codec detected, whether it is supported or not.
655 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", 748 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec",
656 codec_context->codec_id); 749 codec_context->codec_id);
657 // Ensure the codec is supported. IsValidConfig() also checks that the 750 // Ensure the codec is supported. IsValidConfig() also checks that the
658 // frame size and visible size are valid. 751 // frame size and visible size are valid.
659 AVStreamToVideoDecoderConfig(stream, &video_config, false); 752 AVStreamToVideoDecoderConfig(stream, &video_config, false);
660 753
661 if (!video_config.IsValidConfig()) 754 if (!video_config.IsValidConfig())
662 continue; 755 continue;
663 video_stream = stream; 756 video_stream = stream;
664 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { 757 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) {
665 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) { 758 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) {
666 continue; 759 continue;
667 } 760 }
668 } else { 761 } else {
669 continue; 762 continue;
670 } 763 }
671 764
672 streams_[i] = new FFmpegDemuxerStream(this, stream); 765 streams_[i] =
766 new FFmpegDemuxerStream(this, stream, discard_negative_timestamps);
673 max_duration = std::max(max_duration, streams_[i]->duration()); 767 max_duration = std::max(max_duration, streams_[i]->duration());
674 768
675 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { 769 // Find the lowest stream start time. It will be used to rebase all other
676 const base::TimeDelta first_dts = ConvertFromTimeBase( 770 // stream timestamps to expose a zero-based timeline.
677 stream->time_base, stream->first_dts); 771 stream_start_times_[i] = ExtractStartTime(format_context, stream);
678 if (start_time_ == kNoTimestamp() || first_dts < start_time_) 772 if (stream_start_times_[i] != kNoTimestamp() &&
679 start_time_ = first_dts; 773 stream_start_times_[i] < start_time_) {
774 start_time_ = stream_start_times_[i];
680 } 775 }
681 } 776 }
682 777
683 if (!audio_stream && !video_stream) { 778 if (!audio_stream && !video_stream) {
684 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); 779 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
685 return; 780 return;
686 } 781 }
687 782
688 if (text_enabled_) 783 if (text_enabled_)
689 AddTextStreams(); 784 AddTextStreams();
690 785
691 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { 786 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) {
692 // If there is a duration value in the container use that to find the 787 // If there is a duration value in the container use that to find the
693 // maximum between it and the duration from A/V streams. 788 // maximum between it and the duration from A/V streams.
694 const AVRational av_time_base = {1, AV_TIME_BASE}; 789 const AVRational av_time_base = {1, AV_TIME_BASE};
695 max_duration = 790 max_duration =
696 std::max(max_duration, 791 std::max(max_duration,
697 ConvertFromTimeBase(av_time_base, format_context->duration)); 792 ConvertFromTimeBase(av_time_base, format_context->duration));
698 } else { 793 } else {
699 // The duration is unknown, in which case this is likely a live stream. 794 // The duration is unknown, in which case this is likely a live stream.
700 max_duration = kInfiniteDuration(); 795 max_duration = kInfiniteDuration();
701 } 796 }
702 797
703 // Some demuxers, like WAV, do not put timestamps on their frames. We 798 // If no start time could be determined, default to zero and prefer the video
704 // assume the the start time is 0. 799 // stream over the audio stream for seeking. E.g., The WAV demuxer does not
705 if (start_time_ == kNoTimestamp()) 800 // put timestamps on its frames.
801 if (start_time_ == kInfiniteDuration()) {
706 start_time_ = base::TimeDelta(); 802 start_time_ = base::TimeDelta();
803 for (size_t i = 0; i < format_context->nb_streams; ++i) {
804 if (streams_[i])
805 stream_start_times_[i] = start_time_;
806 }
807 }
707 808
708 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS 809 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS
709 // generation so we always get timestamps, see http://crbug.com/169570 810 // generation so we always get timestamps, see http://crbug.com/169570
710 if (strcmp(format_context->iformat->name, "avi") == 0) 811 if (strcmp(format_context->iformat->name, "avi") == 0)
711 format_context->flags |= AVFMT_FLAG_GENPTS; 812 format_context->flags |= AVFMT_FLAG_GENPTS;
712 813
713 timeline_offset_ = ExtractTimelineOffset(format_context); 814 // For testing purposes, don't overwrite the timeline offset if set already.
815 if (timeline_offset_.is_null())
816 timeline_offset_ = ExtractTimelineOffset(format_context);
817
818 // Since we're shifting the externally visible start time to zero, we need to
819 // adjust the timeline offset to compensate.
820 if (!timeline_offset_.is_null())
821 timeline_offset_ += start_time_;
714 822
715 if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) { 823 if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) {
716 liveness_ = LIVENESS_LIVE; 824 liveness_ = LIVENESS_LIVE;
717 } else if (max_duration != kInfiniteDuration()) { 825 } else if (max_duration != kInfiniteDuration()) {
718 liveness_ = LIVENESS_RECORDED; 826 liveness_ = LIVENESS_RECORDED;
719 } else { 827 } else {
720 liveness_ = LIVENESS_UNKNOWN; 828 liveness_ = LIVENESS_UNKNOWN;
721 } 829 }
722 830
723 // Good to go: set the duration and bitrate and notify we're done 831 // Good to go: set the duration and bitrate and notify we're done
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
776 video_codec->time_base.num, 884 video_codec->time_base.num,
777 video_codec->time_base.den)); 885 video_codec->time_base.den));
778 media_log_->SetStringProperty( 886 media_log_->SetStringProperty(
779 "video_format", VideoFrame::FormatToString(video_config.format())); 887 "video_format", VideoFrame::FormatToString(video_config.format()));
780 media_log_->SetBooleanProperty("video_is_encrypted", 888 media_log_->SetBooleanProperty("video_is_encrypted",
781 video_config.is_encrypted()); 889 video_config.is_encrypted());
782 } else { 890 } else {
783 media_log_->SetBooleanProperty("found_video_stream", false); 891 media_log_->SetBooleanProperty("found_video_stream", false);
784 } 892 }
785 893
786
787 media_log_->SetTimeProperty("max_duration", max_duration); 894 media_log_->SetTimeProperty("max_duration", max_duration);
788 media_log_->SetTimeProperty("start_time", start_time_); 895 media_log_->SetTimeProperty("start_time", start_time_);
789 media_log_->SetIntegerProperty("bitrate", bitrate_); 896 media_log_->SetIntegerProperty("bitrate", bitrate_);
790 897
791 status_cb.Run(PIPELINE_OK); 898 status_cb.Run(PIPELINE_OK);
792 } 899 }
793 900
794 void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) { 901 void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) {
795 DCHECK(task_runner_->BelongsToCurrentThread()); 902 DCHECK(task_runner_->BelongsToCurrentThread());
796 CHECK(pending_seek_); 903 CHECK(pending_seek_);
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
1012 } 1119 }
1013 for (size_t i = 0; i < buffered.size(); ++i) 1120 for (size_t i = 0; i < buffered.size(); ++i)
1014 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); 1121 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i));
1015 } 1122 }
1016 1123
1017 void FFmpegDemuxer::OnDataSourceError() { 1124 void FFmpegDemuxer::OnDataSourceError() {
1018 host_->OnDemuxerError(PIPELINE_ERROR_READ); 1125 host_->OnDemuxerError(PIPELINE_ERROR_READ);
1019 } 1126 }
1020 1127
1021 } // namespace media 1128 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698