Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(957)

Side by Side Diff: media/filters/ffmpeg_demuxer.cc

Issue 335273002: Fix seeking when the start time is non-zero. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Comments. New DEPS. Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/filters/ffmpeg_demuxer.h" 5 #include "media/filters/ffmpeg_demuxer.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <string> 8 #include <string>
9 9
10 #include "base/base64.h" 10 #include "base/base64.h"
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
48 } 48 }
49 49
50 return base::Time(); 50 return base::Time();
51 } 51 }
52 52
53 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) { 53 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) {
54 return base::TimeDelta::FromMicroseconds( 54 return base::TimeDelta::FromMicroseconds(
55 frames * base::Time::kMicrosecondsPerSecond / sample_rate); 55 frames * base::Time::kMicrosecondsPerSecond / sample_rate);
56 } 56 }
57 57
58 static base::TimeDelta ExtractStartTime(AVStream* stream,
59 base::TimeDelta start_time_estimate) {
60 DCHECK(start_time_estimate != kNoTimestamp());
61 if (stream->start_time == static_cast<int64_t>(AV_NOPTS_VALUE)) {
62 return start_time_estimate == kInfiniteDuration() ? kNoTimestamp()
63 : start_time_estimate;
64 }
65
66 // First try the lower of the estimate and the |start_time| value.
67 base::TimeDelta start_time =
68 std::min(ConvertFromTimeBase(stream->time_base, stream->start_time),
69 start_time_estimate);
70
71 // Next see if the first buffered pts value is usable.
72 if (stream->pts_buffer[0] != static_cast<int64_t>(AV_NOPTS_VALUE)) {
73 const base::TimeDelta buffered_pts =
74 ConvertFromTimeBase(stream->time_base, stream->pts_buffer[0]);
75 if (buffered_pts < start_time)
76 start_time = buffered_pts;
77 }
78
79 // NOTE: Do not use AVStream->first_dts since |start_time| should be a
80 // presentation timestamp.
81 return start_time;
82 }
83
58 // 84 //
59 // FFmpegDemuxerStream 85 // FFmpegDemuxerStream
60 // 86 //
61 FFmpegDemuxerStream::FFmpegDemuxerStream( 87 FFmpegDemuxerStream::FFmpegDemuxerStream(
62 FFmpegDemuxer* demuxer, 88 FFmpegDemuxer* demuxer,
63 AVStream* stream) 89 AVStream* stream,
90 bool discard_negative_timestamps)
64 : demuxer_(demuxer), 91 : demuxer_(demuxer),
65 task_runner_(base::MessageLoopProxy::current()), 92 task_runner_(base::MessageLoopProxy::current()),
66 stream_(stream), 93 stream_(stream),
67 type_(UNKNOWN), 94 type_(UNKNOWN),
68 end_of_stream_(false), 95 end_of_stream_(false),
69 last_packet_timestamp_(kNoTimestamp()), 96 last_packet_timestamp_(kNoTimestamp()),
70 bitstream_converter_enabled_(false) { 97 bitstream_converter_enabled_(false),
98 discard_negative_timestamps_(discard_negative_timestamps) {
71 DCHECK(demuxer_); 99 DCHECK(demuxer_);
72 100
73 bool is_encrypted = false; 101 bool is_encrypted = false;
74 102
75 // Determine our media format. 103 // Determine our media format.
76 switch (stream->codec->codec_type) { 104 switch (stream->codec->codec_type) {
77 case AVMEDIA_TYPE_AUDIO: 105 case AVMEDIA_TYPE_AUDIO:
78 type_ = AUDIO; 106 type_ = AUDIO;
79 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); 107 AVStreamToAudioDecoderConfig(stream, &audio_config_, true);
80 is_encrypted = audio_config_.is_encrypted(); 108 is_encrypted = audio_config_.is_encrypted();
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
219 audio_decoder_config().samples_per_second(); 247 audio_decoder_config().samples_per_second();
220 buffer->set_discard_padding(std::make_pair( 248 buffer->set_discard_padding(std::make_pair(
221 FramesToTimeDelta(discard_front_samples, samples_per_second), 249 FramesToTimeDelta(discard_front_samples, samples_per_second),
222 FramesToTimeDelta(discard_end_samples, samples_per_second))); 250 FramesToTimeDelta(discard_end_samples, samples_per_second)));
223 } 251 }
224 252
225 if (decrypt_config) 253 if (decrypt_config)
226 buffer->set_decrypt_config(decrypt_config.Pass()); 254 buffer->set_decrypt_config(decrypt_config.Pass());
227 } 255 }
228 256
229 buffer->set_timestamp(ConvertStreamTimestamp( 257 buffer->set_duration(
230 stream_->time_base, packet->pts)); 258 ConvertStreamTimestamp(stream_->time_base, packet->duration));
231 buffer->set_duration(ConvertStreamTimestamp( 259
232 stream_->time_base, packet->duration)); 260 // Note: If pts is AV_NOPTS_VALUE, stream_timestamp will be kNoTimestamp().
233 if (buffer->timestamp() != kNoTimestamp() && 261 const base::TimeDelta stream_timestamp =
234 last_packet_timestamp_ != kNoTimestamp() && 262 ConvertStreamTimestamp(stream_->time_base, packet->pts);
235 last_packet_timestamp_ < buffer->timestamp()) { 263
236 buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp()); 264 if (stream_timestamp != kNoTimestamp()) {
237 demuxer_->NotifyBufferingChanged(); 265 buffer->set_timestamp(stream_timestamp - demuxer_->start_time());
266
267 // If enabled, mark packets with negative timestamps for post-decode
268 // discard.
269 if (discard_negative_timestamps_ && stream_timestamp < base::TimeDelta()) {
270 if (stream_timestamp + buffer->duration() < base::TimeDelta()) {
271 // Discard the entier packet if it's entirely before zero.
acolwell GONE FROM CHROMIUM 2014/06/17 22:58:54 nit: s/entier/entire/
DaleCurtis 2014/06/17 23:04:05 Done.
272 buffer->set_discard_padding(
273 std::make_pair(kInfiniteDuration(), base::TimeDelta()));
274 } else {
275 // Only discard part of the frame if it overlaps zero.
276 buffer->set_discard_padding(
277 std::make_pair(-stream_timestamp, base::TimeDelta()));
278 }
279 }
280
281 if (last_packet_timestamp_ != kNoTimestamp() &&
282 last_packet_timestamp_ < buffer->timestamp()) {
283 buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp());
284 demuxer_->NotifyBufferingChanged();
285 }
286
287 // The demuxer should always output positive timestamps.
288 DCHECK(buffer->timestamp() >= base::TimeDelta());
289 } else {
290 buffer->set_timestamp(kNoTimestamp());
238 } 291 }
292
293 // TODO(dalecurtis): This allows transitions from <valid ts> -> <no timestamp>
294 // which shouldn't be allowed. See http://crbug.com/384532
239 last_packet_timestamp_ = buffer->timestamp(); 295 last_packet_timestamp_ = buffer->timestamp();
240 296
241 buffer_queue_.Push(buffer); 297 buffer_queue_.Push(buffer);
242 SatisfyPendingRead(); 298 SatisfyPendingRead();
243 } 299 }
244 300
245 void FFmpegDemuxerStream::SetEndOfStream() { 301 void FFmpegDemuxerStream::SetEndOfStream() {
246 DCHECK(task_runner_->BelongsToCurrentThread()); 302 DCHECK(task_runner_->BelongsToCurrentThread());
247 end_of_stream_ = true; 303 end_of_stream_ = true;
248 SatisfyPendingRead(); 304 SatisfyPendingRead();
(...skipping 12 matching lines...) Expand all
261 buffer_queue_.Clear(); 317 buffer_queue_.Clear();
262 if (!read_cb_.is_null()) { 318 if (!read_cb_.is_null()) {
263 base::ResetAndReturn(&read_cb_).Run( 319 base::ResetAndReturn(&read_cb_).Run(
264 DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer()); 320 DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer());
265 } 321 }
266 demuxer_ = NULL; 322 demuxer_ = NULL;
267 stream_ = NULL; 323 stream_ = NULL;
268 end_of_stream_ = true; 324 end_of_stream_ = true;
269 } 325 }
270 326
271 base::TimeDelta FFmpegDemuxerStream::duration() {
272 return duration_;
273 }
274
275 DemuxerStream::Type FFmpegDemuxerStream::type() { 327 DemuxerStream::Type FFmpegDemuxerStream::type() {
276 DCHECK(task_runner_->BelongsToCurrentThread()); 328 DCHECK(task_runner_->BelongsToCurrentThread());
277 return type_; 329 return type_;
278 } 330 }
279 331
280 void FFmpegDemuxerStream::Read(const ReadCB& read_cb) { 332 void FFmpegDemuxerStream::Read(const ReadCB& read_cb) {
281 DCHECK(task_runner_->BelongsToCurrentThread()); 333 DCHECK(task_runner_->BelongsToCurrentThread());
282 CHECK(read_cb_.is_null()) << "Overlapping reads are not supported"; 334 CHECK(read_cb_.is_null()) << "Overlapping reads are not supported";
283 read_cb_ = BindToCurrentLoop(read_cb); 335 read_cb_ = BindToCurrentLoop(read_cb);
284 336
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
409 const scoped_refptr<MediaLog>& media_log) 461 const scoped_refptr<MediaLog>& media_log)
410 : host_(NULL), 462 : host_(NULL),
411 task_runner_(task_runner), 463 task_runner_(task_runner),
412 blocking_thread_("FFmpegDemuxer"), 464 blocking_thread_("FFmpegDemuxer"),
413 pending_read_(false), 465 pending_read_(false),
414 pending_seek_(false), 466 pending_seek_(false),
415 data_source_(data_source), 467 data_source_(data_source),
416 media_log_(media_log), 468 media_log_(media_log),
417 bitrate_(0), 469 bitrate_(0),
418 start_time_(kNoTimestamp()), 470 start_time_(kNoTimestamp()),
471 preferred_stream_for_seeking_(-1, kNoTimestamp()),
472 fallback_stream_for_seeking_(-1, kNoTimestamp()),
419 liveness_(LIVENESS_UNKNOWN), 473 liveness_(LIVENESS_UNKNOWN),
420 text_enabled_(false), 474 text_enabled_(false),
421 duration_known_(false), 475 duration_known_(false),
422 need_key_cb_(need_key_cb), 476 need_key_cb_(need_key_cb),
423 weak_factory_(this) { 477 weak_factory_(this) {
424 DCHECK(task_runner_.get()); 478 DCHECK(task_runner_.get());
425 DCHECK(data_source_); 479 DCHECK(data_source_);
426 } 480 }
427 481
428 FFmpegDemuxer::~FFmpegDemuxer() {} 482 FFmpegDemuxer::~FFmpegDemuxer() {}
429 483
430 void FFmpegDemuxer::Stop(const base::Closure& callback) { 484 void FFmpegDemuxer::Stop(const base::Closure& callback) {
431 DCHECK(task_runner_->BelongsToCurrentThread()); 485 DCHECK(task_runner_->BelongsToCurrentThread());
432 url_protocol_->Abort(); 486 url_protocol_->Abort();
433 data_source_->Stop( 487 data_source_->Stop(
434 BindToCurrentLoop(base::Bind(&FFmpegDemuxer::OnDataSourceStopped, 488 BindToCurrentLoop(base::Bind(&FFmpegDemuxer::OnDataSourceStopped,
435 weak_factory_.GetWeakPtr(), 489 weak_factory_.GetWeakPtr(),
436 BindToCurrentLoop(callback)))); 490 BindToCurrentLoop(callback))));
437 data_source_ = NULL; 491 data_source_ = NULL;
438 } 492 }
439 493
440 void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) { 494 void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
441 DCHECK(task_runner_->BelongsToCurrentThread()); 495 DCHECK(task_runner_->BelongsToCurrentThread());
442 CHECK(!pending_seek_); 496 CHECK(!pending_seek_);
443 497
444 // TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|, 498 // TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|,
445 // otherwise we can end up waiting for a pre-seek read to complete even though 499 // otherwise we can end up waiting for a pre-seek read to complete even though
446 // we know we're going to drop it on the floor. 500 // we know we're going to drop it on the floor.
447 501
448 // Always seek to a timestamp less than or equal to the desired timestamp. 502 // FFmpeg requires seeks to be adjusted according to the lowest starting time.
449 int flags = AVSEEK_FLAG_BACKWARD; 503 const base::TimeDelta seek_time = time + start_time_;
450 504
451 // Passing -1 as our stream index lets FFmpeg pick a default stream. FFmpeg 505 // Choose the preferred stream if |seek_time| occurs after its starting time,
452 // will attempt to use the lowest-index video stream, if present, followed by 506 // otherwise use the fallback stream.
453 // the lowest-index audio stream. 507 DCHECK(preferred_stream_for_seeking_.second != kNoTimestamp());
508 const int stream_index = seek_time >= preferred_stream_for_seeking_.second
509 ? preferred_stream_for_seeking_.first
510 : fallback_stream_for_seeking_.first;
511 DCHECK_NE(stream_index, -1);
512
513 const AVStream* seeking_stream =
514 glue_->format_context()->streams[stream_index];
515
454 pending_seek_ = true; 516 pending_seek_ = true;
455 base::PostTaskAndReplyWithResult( 517 base::PostTaskAndReplyWithResult(
456 blocking_thread_.message_loop_proxy().get(), 518 blocking_thread_.message_loop_proxy().get(),
457 FROM_HERE, 519 FROM_HERE,
458 base::Bind(&av_seek_frame, 520 base::Bind(&av_seek_frame,
459 glue_->format_context(), 521 glue_->format_context(),
460 -1, 522 seeking_stream->index,
461 time.InMicroseconds(), 523 ConvertToTimeBase(seeking_stream->time_base, seek_time),
462 flags), 524 // Always seek to a timestamp <= to the desired timestamp.
525 AVSEEK_FLAG_BACKWARD),
463 base::Bind( 526 base::Bind(
464 &FFmpegDemuxer::OnSeekFrameDone, weak_factory_.GetWeakPtr(), cb)); 527 &FFmpegDemuxer::OnSeekFrameDone, weak_factory_.GetWeakPtr(), cb));
465 } 528 }
466 529
467 void FFmpegDemuxer::Initialize(DemuxerHost* host, 530 void FFmpegDemuxer::Initialize(DemuxerHost* host,
468 const PipelineStatusCB& status_cb, 531 const PipelineStatusCB& status_cb,
469 bool enable_text_tracks) { 532 bool enable_text_tracks) {
470 DCHECK(task_runner_->BelongsToCurrentThread()); 533 DCHECK(task_runner_->BelongsToCurrentThread());
471 host_ = host; 534 host_ = host;
472 text_enabled_ = enable_text_tracks; 535 text_enabled_ = enable_text_tracks;
(...skipping 28 matching lines...) Expand all
501 DemuxerStream::Type type) const { 564 DemuxerStream::Type type) const {
502 StreamVector::const_iterator iter; 565 StreamVector::const_iterator iter;
503 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { 566 for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
504 if (*iter && (*iter)->type() == type) { 567 if (*iter && (*iter)->type() == type) {
505 return *iter; 568 return *iter;
506 } 569 }
507 } 570 }
508 return NULL; 571 return NULL;
509 } 572 }
510 573
511 base::TimeDelta FFmpegDemuxer::GetStartTime() const {
512 DCHECK(task_runner_->BelongsToCurrentThread());
513 return start_time_;
514 }
515
516 base::Time FFmpegDemuxer::GetTimelineOffset() const { 574 base::Time FFmpegDemuxer::GetTimelineOffset() const {
517 return timeline_offset_; 575 return timeline_offset_;
518 } 576 }
519 577
520 Demuxer::Liveness FFmpegDemuxer::GetLiveness() const { 578 Demuxer::Liveness FFmpegDemuxer::GetLiveness() const {
521 DCHECK(task_runner_->BelongsToCurrentThread()); 579 DCHECK(task_runner_->BelongsToCurrentThread());
522 return liveness_; 580 return liveness_;
523 } 581 }
524 582
525 void FFmpegDemuxer::AddTextStreams() { 583 void FFmpegDemuxer::AddTextStreams() {
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 return; 673 return;
616 } 674 }
617 675
618 // Create demuxer stream entries for each possible AVStream. Each stream 676 // Create demuxer stream entries for each possible AVStream. Each stream
619 // is examined to determine if it is supported or not (is the codec enabled 677 // is examined to determine if it is supported or not (is the codec enabled
620 // for it in this release?). Unsupported streams are skipped, allowing for 678 // for it in this release?). Unsupported streams are skipped, allowing for
621 // partial playback. At least one audio or video stream must be playable. 679 // partial playback. At least one audio or video stream must be playable.
622 AVFormatContext* format_context = glue_->format_context(); 680 AVFormatContext* format_context = glue_->format_context();
623 streams_.resize(format_context->nb_streams); 681 streams_.resize(format_context->nb_streams);
624 682
683 // Estimate the start time for each stream by looking through the packets
684 // buffered during avformat_find_stream_info(). These values will be
685 // considered later when determining the actual stream start time.
686 //
687 // These packets haven't been completely processed yet, so only look through
688 // these values if the AVFormatContext has a valid start time.
689 //
690 // If no estimate is found, the stream entry will be kInfiniteDuration().
691 std::vector<base::TimeDelta> start_time_estimates(format_context->nb_streams,
692 kInfiniteDuration());
693 if (format_context->packet_buffer &&
694 format_context->start_time != static_cast<int64>(AV_NOPTS_VALUE)) {
695 struct AVPacketList* packet_buffer = format_context->packet_buffer;
696 while (packet_buffer != format_context->packet_buffer_end) {
697 DCHECK_LT(static_cast<size_t>(packet_buffer->pkt.stream_index),
698 start_time_estimates.size());
699 const AVStream* stream =
700 format_context->streams[packet_buffer->pkt.stream_index];
701 if (packet_buffer->pkt.pts != static_cast<int64>(AV_NOPTS_VALUE)) {
702 const base::TimeDelta packet_pts =
703 ConvertFromTimeBase(stream->time_base, packet_buffer->pkt.pts);
704 if (packet_pts < start_time_estimates[stream->index])
705 start_time_estimates[stream->index] = packet_pts;
706 }
707 packet_buffer = packet_buffer->next;
708 }
709 }
710
625 AVStream* audio_stream = NULL; 711 AVStream* audio_stream = NULL;
626 AudioDecoderConfig audio_config; 712 AudioDecoderConfig audio_config;
627 713
628 AVStream* video_stream = NULL; 714 AVStream* video_stream = NULL;
629 VideoDecoderConfig video_config; 715 VideoDecoderConfig video_config;
630 716
717 // If available, |start_time_| will be set to the lowest stream start time.
718 start_time_ = kInfiniteDuration();
719
631 base::TimeDelta max_duration; 720 base::TimeDelta max_duration;
632 for (size_t i = 0; i < format_context->nb_streams; ++i) { 721 for (size_t i = 0; i < format_context->nb_streams; ++i) {
633 AVStream* stream = format_context->streams[i]; 722 AVStream* stream = format_context->streams[i];
634 AVCodecContext* codec_context = stream->codec; 723 const AVCodecContext* codec_context = stream->codec;
635 AVMediaType codec_type = codec_context->codec_type; 724 const AVMediaType codec_type = codec_context->codec_type;
725 bool discard_negative_timestamps = false;
636 726
637 if (codec_type == AVMEDIA_TYPE_AUDIO) { 727 if (codec_type == AVMEDIA_TYPE_AUDIO) {
638 if (audio_stream) 728 if (audio_stream)
639 continue; 729 continue;
640 730
641 // Log the codec detected, whether it is supported or not. 731 // Log the codec detected, whether it is supported or not.
642 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec", 732 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec",
643 codec_context->codec_id); 733 codec_context->codec_id);
644 // Ensure the codec is supported. IsValidConfig() also checks that the 734 // Ensure the codec is supported. IsValidConfig() also checks that the
645 // channel layout and sample format are valid. 735 // channel layout and sample format are valid.
646 AVStreamToAudioDecoderConfig(stream, &audio_config, false); 736 AVStreamToAudioDecoderConfig(stream, &audio_config, false);
647 if (!audio_config.IsValidConfig()) 737 if (!audio_config.IsValidConfig())
648 continue; 738 continue;
649 audio_stream = stream; 739 audio_stream = stream;
740
741 // Enable post-decode frame dropping for packets with negative timestamps
742 // as outlined in section A.2 in the Ogg Vorbis spec:
743 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html
744 discard_negative_timestamps =
745 audio_config.codec() == kCodecVorbis &&
746 strcmp(glue_->format_context()->iformat->name, "ogg") == 0;
650 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { 747 } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
651 if (video_stream) 748 if (video_stream)
652 continue; 749 continue;
653 750
654 // Log the codec detected, whether it is supported or not. 751 // Log the codec detected, whether it is supported or not.
655 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", 752 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec",
656 codec_context->codec_id); 753 codec_context->codec_id);
657 // Ensure the codec is supported. IsValidConfig() also checks that the 754 // Ensure the codec is supported. IsValidConfig() also checks that the
658 // frame size and visible size are valid. 755 // frame size and visible size are valid.
659 AVStreamToVideoDecoderConfig(stream, &video_config, false); 756 AVStreamToVideoDecoderConfig(stream, &video_config, false);
660 757
661 if (!video_config.IsValidConfig()) 758 if (!video_config.IsValidConfig())
662 continue; 759 continue;
663 video_stream = stream; 760 video_stream = stream;
664 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { 761 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) {
665 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) { 762 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) {
666 continue; 763 continue;
667 } 764 }
668 } else { 765 } else {
669 continue; 766 continue;
670 } 767 }
671 768
672 streams_[i] = new FFmpegDemuxerStream(this, stream); 769 streams_[i] =
770 new FFmpegDemuxerStream(this, stream, discard_negative_timestamps);
673 max_duration = std::max(max_duration, streams_[i]->duration()); 771 max_duration = std::max(max_duration, streams_[i]->duration());
674 772
675 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { 773 const base::TimeDelta start_time =
676 const base::TimeDelta first_dts = ConvertFromTimeBase( 774 ExtractStartTime(stream, start_time_estimates[i]);
677 stream->time_base, stream->first_dts); 775 if (start_time == kNoTimestamp())
678 if (start_time_ == kNoTimestamp() || first_dts < start_time_) 776 continue;
679 start_time_ = first_dts; 777
778 if (start_time < start_time_) {
779 start_time_ = start_time;
780
781 // Choose the stream with the lowest starting time as the fallback stream
782 // for seeking. Video should always be preferred.
783 fallback_stream_for_seeking_ = std::make_pair(i, start_time);
680 } 784 }
785
786 // Always prefer the video stream for seeking. If none exists, we'll swap
787 // the fallback stream with the preferred stream below.
788 if (codec_type == AVMEDIA_TYPE_VIDEO)
789 preferred_stream_for_seeking_ = std::make_pair(i, start_time);
681 } 790 }
682 791
683 if (!audio_stream && !video_stream) { 792 if (!audio_stream && !video_stream) {
684 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); 793 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
685 return; 794 return;
686 } 795 }
687 796
688 if (text_enabled_) 797 if (text_enabled_)
689 AddTextStreams(); 798 AddTextStreams();
690 799
691 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { 800 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) {
692 // If there is a duration value in the container use that to find the 801 // If there is a duration value in the container use that to find the
693 // maximum between it and the duration from A/V streams. 802 // maximum between it and the duration from A/V streams.
694 const AVRational av_time_base = {1, AV_TIME_BASE}; 803 const AVRational av_time_base = {1, AV_TIME_BASE};
695 max_duration = 804 max_duration =
696 std::max(max_duration, 805 std::max(max_duration,
697 ConvertFromTimeBase(av_time_base, format_context->duration)); 806 ConvertFromTimeBase(av_time_base, format_context->duration));
698 } else { 807 } else {
699 // The duration is unknown, in which case this is likely a live stream. 808 // The duration is unknown, in which case this is likely a live stream.
700 max_duration = kInfiniteDuration(); 809 max_duration = kInfiniteDuration();
701 } 810 }
702 811
703 // Some demuxers, like WAV, do not put timestamps on their frames. We 812 // If no start time could be determined, default to zero and prefer the video
704 // assume the the start time is 0. 813 // stream over the audio stream for seeking. E.g., The WAV demuxer does not
705 if (start_time_ == kNoTimestamp()) 814 // put timestamps on its frames.
815 if (start_time_ == kInfiniteDuration()) {
706 start_time_ = base::TimeDelta(); 816 start_time_ = base::TimeDelta();
817 preferred_stream_for_seeking_ = std::make_pair(
818 video_stream ? video_stream->index : audio_stream->index, start_time_);
819 } else if (!video_stream) {
820 // If no video stream exists, use the audio or text stream found above.
821 preferred_stream_for_seeking_ = fallback_stream_for_seeking_;
822 }
707 823
708 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS 824 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS
709 // generation so we always get timestamps, see http://crbug.com/169570 825 // generation so we always get timestamps, see http://crbug.com/169570
710 if (strcmp(format_context->iformat->name, "avi") == 0) 826 if (strcmp(format_context->iformat->name, "avi") == 0)
711 format_context->flags |= AVFMT_FLAG_GENPTS; 827 format_context->flags |= AVFMT_FLAG_GENPTS;
712 828
713 timeline_offset_ = ExtractTimelineOffset(format_context); 829 // For testing purposes, don't overwrite the timeline offset if set already.
830 if (timeline_offset_.is_null())
831 timeline_offset_ = ExtractTimelineOffset(format_context);
832
833 // Since we're shifting the externally visible start time to zero, we need to
834 // adjust the timeline offset to compensate.
835 if (!timeline_offset_.is_null())
836 timeline_offset_ += start_time_;
714 837
715 if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) { 838 if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) {
716 liveness_ = LIVENESS_LIVE; 839 liveness_ = LIVENESS_LIVE;
717 } else if (max_duration != kInfiniteDuration()) { 840 } else if (max_duration != kInfiniteDuration()) {
718 liveness_ = LIVENESS_RECORDED; 841 liveness_ = LIVENESS_RECORDED;
719 } else { 842 } else {
720 liveness_ = LIVENESS_UNKNOWN; 843 liveness_ = LIVENESS_UNKNOWN;
721 } 844 }
722 845
723 // Good to go: set the duration and bitrate and notify we're done 846 // Good to go: set the duration and bitrate and notify we're done
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
776 video_codec->time_base.num, 899 video_codec->time_base.num,
777 video_codec->time_base.den)); 900 video_codec->time_base.den));
778 media_log_->SetStringProperty( 901 media_log_->SetStringProperty(
779 "video_format", VideoFrame::FormatToString(video_config.format())); 902 "video_format", VideoFrame::FormatToString(video_config.format()));
780 media_log_->SetBooleanProperty("video_is_encrypted", 903 media_log_->SetBooleanProperty("video_is_encrypted",
781 video_config.is_encrypted()); 904 video_config.is_encrypted());
782 } else { 905 } else {
783 media_log_->SetBooleanProperty("found_video_stream", false); 906 media_log_->SetBooleanProperty("found_video_stream", false);
784 } 907 }
785 908
786
787 media_log_->SetTimeProperty("max_duration", max_duration); 909 media_log_->SetTimeProperty("max_duration", max_duration);
788 media_log_->SetTimeProperty("start_time", start_time_); 910 media_log_->SetTimeProperty("start_time", start_time_);
789 media_log_->SetIntegerProperty("bitrate", bitrate_); 911 media_log_->SetIntegerProperty("bitrate", bitrate_);
790 912
791 status_cb.Run(PIPELINE_OK); 913 status_cb.Run(PIPELINE_OK);
792 } 914 }
793 915
794 void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) { 916 void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) {
795 DCHECK(task_runner_->BelongsToCurrentThread()); 917 DCHECK(task_runner_->BelongsToCurrentThread());
796 CHECK(pending_seek_); 918 CHECK(pending_seek_);
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
1012 } 1134 }
1013 for (size_t i = 0; i < buffered.size(); ++i) 1135 for (size_t i = 0; i < buffered.size(); ++i)
1014 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); 1136 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i));
1015 } 1137 }
1016 1138
1017 void FFmpegDemuxer::OnDataSourceError() { 1139 void FFmpegDemuxer::OnDataSourceError() {
1018 host_->OnDemuxerError(PIPELINE_ERROR_READ); 1140 host_->OnDemuxerError(PIPELINE_ERROR_READ);
1019 } 1141 }
1020 1142
1021 } // namespace media 1143 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698