OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_demuxer.h" | 5 #include "media/filters/ffmpeg_demuxer.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <string> | 8 #include <string> |
9 | 9 |
10 #include "base/base64.h" | 10 #include "base/base64.h" |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
48 } | 48 } |
49 | 49 |
50 return base::Time(); | 50 return base::Time(); |
51 } | 51 } |
52 | 52 |
53 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) { | 53 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) { |
54 return base::TimeDelta::FromMicroseconds( | 54 return base::TimeDelta::FromMicroseconds( |
55 frames * base::Time::kMicrosecondsPerSecond / sample_rate); | 55 frames * base::Time::kMicrosecondsPerSecond / sample_rate); |
56 } | 56 } |
57 | 57 |
58 static base::TimeDelta ExtractStartTime(AVStream* stream) { | |
59 if (stream->start_time == static_cast<int64_t>(AV_NOPTS_VALUE)) | |
60 return kNoTimestamp(); | |
61 | |
62 // First try to use the |start_time| value directly. | |
63 const base::TimeDelta start_time = | |
64 ConvertFromTimeBase(stream->time_base, stream->start_time); | |
65 | |
66 // Then compare against the first timestamp to see if adjustment is required. | |
67 if (stream->first_dts == static_cast<int64_t>(AV_NOPTS_VALUE)) | |
68 return start_time; | |
69 | |
70 const base::TimeDelta first_dts = | |
71 ConvertFromTimeBase(stream->time_base, stream->first_dts); | |
72 | |
73 return first_dts < start_time ? first_dts : start_time; | |
74 } | |
75 | |
76 // | 58 // |
77 // FFmpegDemuxerStream | 59 // FFmpegDemuxerStream |
78 // | 60 // |
79 FFmpegDemuxerStream::FFmpegDemuxerStream( | 61 FFmpegDemuxerStream::FFmpegDemuxerStream( |
80 FFmpegDemuxer* demuxer, | 62 FFmpegDemuxer* demuxer, |
81 AVStream* stream, | 63 AVStream* stream) |
82 bool discard_negative_timestamps) | |
83 : demuxer_(demuxer), | 64 : demuxer_(demuxer), |
84 task_runner_(base::MessageLoopProxy::current()), | 65 task_runner_(base::MessageLoopProxy::current()), |
85 stream_(stream), | 66 stream_(stream), |
86 type_(UNKNOWN), | 67 type_(UNKNOWN), |
87 end_of_stream_(false), | 68 end_of_stream_(false), |
88 last_packet_timestamp_(kNoTimestamp()), | 69 last_packet_timestamp_(kNoTimestamp()), |
89 bitstream_converter_enabled_(false), | 70 bitstream_converter_enabled_(false) { |
90 discard_negative_timestamps_(discard_negative_timestamps) { | |
91 DCHECK(demuxer_); | 71 DCHECK(demuxer_); |
92 | 72 |
93 bool is_encrypted = false; | 73 bool is_encrypted = false; |
94 | 74 |
95 // Determine our media format. | 75 // Determine our media format. |
96 switch (stream->codec->codec_type) { | 76 switch (stream->codec->codec_type) { |
97 case AVMEDIA_TYPE_AUDIO: | 77 case AVMEDIA_TYPE_AUDIO: |
98 type_ = AUDIO; | 78 type_ = AUDIO; |
99 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); | 79 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); |
100 is_encrypted = audio_config_.is_encrypted(); | 80 is_encrypted = audio_config_.is_encrypted(); |
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
239 audio_decoder_config().samples_per_second(); | 219 audio_decoder_config().samples_per_second(); |
240 buffer->set_discard_padding(std::make_pair( | 220 buffer->set_discard_padding(std::make_pair( |
241 FramesToTimeDelta(discard_front_samples, samples_per_second), | 221 FramesToTimeDelta(discard_front_samples, samples_per_second), |
242 FramesToTimeDelta(discard_end_samples, samples_per_second))); | 222 FramesToTimeDelta(discard_end_samples, samples_per_second))); |
243 } | 223 } |
244 | 224 |
245 if (decrypt_config) | 225 if (decrypt_config) |
246 buffer->set_decrypt_config(decrypt_config.Pass()); | 226 buffer->set_decrypt_config(decrypt_config.Pass()); |
247 } | 227 } |
248 | 228 |
249 buffer->set_duration( | 229 buffer->set_timestamp(ConvertStreamTimestamp( |
250 ConvertStreamTimestamp(stream_->time_base, packet->duration)); | 230 stream_->time_base, packet->pts)); |
251 | 231 buffer->set_duration(ConvertStreamTimestamp( |
252 // Note: If pts is AV_NOPTS_VALUE, stream_timestamp will be kNoTimestamp(). | 232 stream_->time_base, packet->duration)); |
253 const base::TimeDelta stream_timestamp = | 233 if (buffer->timestamp() != kNoTimestamp() && |
254 ConvertStreamTimestamp(stream_->time_base, packet->pts); | 234 last_packet_timestamp_ != kNoTimestamp() && |
255 | 235 last_packet_timestamp_ < buffer->timestamp()) { |
256 if (stream_timestamp != kNoTimestamp()) { | 236 buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp()); |
257 buffer->set_timestamp(stream_timestamp - demuxer_->start_time()); | 237 demuxer_->NotifyBufferingChanged(); |
258 | |
259 // If enabled, mark packets with negative timestamps for post-decode | |
260 // discard. | |
261 if (discard_negative_timestamps_ && stream_timestamp < base::TimeDelta()) { | |
262 if (stream_timestamp + buffer->duration() < base::TimeDelta()) { | |
263 // Discard the entier packet if it's entirely before zero. | |
264 buffer->set_discard_padding( | |
265 std::make_pair(kInfiniteDuration(), base::TimeDelta())); | |
266 } else { | |
267 // Only discard part of the frame if it overlaps zero. | |
268 buffer->set_discard_padding( | |
269 std::make_pair(-stream_timestamp, base::TimeDelta())); | |
270 } | |
271 } | |
272 | |
273 if (last_packet_timestamp_ != kNoTimestamp() && | |
274 last_packet_timestamp_ < buffer->timestamp()) { | |
275 buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp()); | |
276 demuxer_->NotifyBufferingChanged(); | |
277 } | |
278 | |
279 // The demuxer should always output positive timestamps. | |
280 DCHECK(buffer->timestamp() >= base::TimeDelta()); | |
281 } else { | |
282 buffer->set_timestamp(kNoTimestamp()); | |
283 } | 238 } |
284 | |
285 // TODO(dalecurtis): This allows transitions from <valid ts> -> <no timestamp> | |
286 // which shouldn't be allowed. See http://crbug.com/384532 | |
287 last_packet_timestamp_ = buffer->timestamp(); | 239 last_packet_timestamp_ = buffer->timestamp(); |
288 | 240 |
289 buffer_queue_.Push(buffer); | 241 buffer_queue_.Push(buffer); |
290 SatisfyPendingRead(); | 242 SatisfyPendingRead(); |
291 } | 243 } |
292 | 244 |
293 void FFmpegDemuxerStream::SetEndOfStream() { | 245 void FFmpegDemuxerStream::SetEndOfStream() { |
294 DCHECK(task_runner_->BelongsToCurrentThread()); | 246 DCHECK(task_runner_->BelongsToCurrentThread()); |
295 end_of_stream_ = true; | 247 end_of_stream_ = true; |
296 SatisfyPendingRead(); | 248 SatisfyPendingRead(); |
(...skipping 12 matching lines...) Expand all Loading... |
309 buffer_queue_.Clear(); | 261 buffer_queue_.Clear(); |
310 if (!read_cb_.is_null()) { | 262 if (!read_cb_.is_null()) { |
311 base::ResetAndReturn(&read_cb_).Run( | 263 base::ResetAndReturn(&read_cb_).Run( |
312 DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer()); | 264 DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer()); |
313 } | 265 } |
314 demuxer_ = NULL; | 266 demuxer_ = NULL; |
315 stream_ = NULL; | 267 stream_ = NULL; |
316 end_of_stream_ = true; | 268 end_of_stream_ = true; |
317 } | 269 } |
318 | 270 |
| 271 base::TimeDelta FFmpegDemuxerStream::duration() { |
| 272 return duration_; |
| 273 } |
| 274 |
319 DemuxerStream::Type FFmpegDemuxerStream::type() { | 275 DemuxerStream::Type FFmpegDemuxerStream::type() { |
320 DCHECK(task_runner_->BelongsToCurrentThread()); | 276 DCHECK(task_runner_->BelongsToCurrentThread()); |
321 return type_; | 277 return type_; |
322 } | 278 } |
323 | 279 |
324 void FFmpegDemuxerStream::Read(const ReadCB& read_cb) { | 280 void FFmpegDemuxerStream::Read(const ReadCB& read_cb) { |
325 DCHECK(task_runner_->BelongsToCurrentThread()); | 281 DCHECK(task_runner_->BelongsToCurrentThread()); |
326 CHECK(read_cb_.is_null()) << "Overlapping reads are not supported"; | 282 CHECK(read_cb_.is_null()) << "Overlapping reads are not supported"; |
327 read_cb_ = BindToCurrentLoop(read_cb); | 283 read_cb_ = BindToCurrentLoop(read_cb); |
328 | 284 |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
457 pending_read_(false), | 413 pending_read_(false), |
458 pending_seek_(false), | 414 pending_seek_(false), |
459 data_source_(data_source), | 415 data_source_(data_source), |
460 media_log_(media_log), | 416 media_log_(media_log), |
461 bitrate_(0), | 417 bitrate_(0), |
462 start_time_(kNoTimestamp()), | 418 start_time_(kNoTimestamp()), |
463 liveness_(LIVENESS_UNKNOWN), | 419 liveness_(LIVENESS_UNKNOWN), |
464 text_enabled_(false), | 420 text_enabled_(false), |
465 duration_known_(false), | 421 duration_known_(false), |
466 need_key_cb_(need_key_cb), | 422 need_key_cb_(need_key_cb), |
467 stream_index_for_seeking_(0), | |
468 weak_factory_(this) { | 423 weak_factory_(this) { |
469 DCHECK(task_runner_.get()); | 424 DCHECK(task_runner_.get()); |
470 DCHECK(data_source_); | 425 DCHECK(data_source_); |
471 } | 426 } |
472 | 427 |
473 FFmpegDemuxer::~FFmpegDemuxer() {} | 428 FFmpegDemuxer::~FFmpegDemuxer() {} |
474 | 429 |
475 void FFmpegDemuxer::Stop(const base::Closure& callback) { | 430 void FFmpegDemuxer::Stop(const base::Closure& callback) { |
476 DCHECK(task_runner_->BelongsToCurrentThread()); | 431 DCHECK(task_runner_->BelongsToCurrentThread()); |
477 url_protocol_->Abort(); | 432 url_protocol_->Abort(); |
478 data_source_->Stop( | 433 data_source_->Stop( |
479 BindToCurrentLoop(base::Bind(&FFmpegDemuxer::OnDataSourceStopped, | 434 BindToCurrentLoop(base::Bind(&FFmpegDemuxer::OnDataSourceStopped, |
480 weak_factory_.GetWeakPtr(), | 435 weak_factory_.GetWeakPtr(), |
481 BindToCurrentLoop(callback)))); | 436 BindToCurrentLoop(callback)))); |
482 data_source_ = NULL; | 437 data_source_ = NULL; |
483 } | 438 } |
484 | 439 |
485 void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) { | 440 void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) { |
486 DCHECK(task_runner_->BelongsToCurrentThread()); | 441 DCHECK(task_runner_->BelongsToCurrentThread()); |
487 CHECK(!pending_seek_); | 442 CHECK(!pending_seek_); |
488 | 443 |
489 // TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|, | 444 // TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|, |
490 // otherwise we can end up waiting for a pre-seek read to complete even though | 445 // otherwise we can end up waiting for a pre-seek read to complete even though |
491 // we know we're going to drop it on the floor. | 446 // we know we're going to drop it on the floor. |
492 | 447 |
493 const AVStream* seeking_stream = | 448 // Always seek to a timestamp less than or equal to the desired timestamp. |
494 glue_->format_context()->streams[stream_index_for_seeking_]; | 449 int flags = AVSEEK_FLAG_BACKWARD; |
495 | 450 |
| 451 // Passing -1 as our stream index lets FFmpeg pick a default stream. FFmpeg |
| 452 // will attempt to use the lowest-index video stream, if present, followed by |
| 453 // the lowest-index audio stream. |
496 pending_seek_ = true; | 454 pending_seek_ = true; |
497 base::PostTaskAndReplyWithResult( | 455 base::PostTaskAndReplyWithResult( |
498 blocking_thread_.message_loop_proxy().get(), | 456 blocking_thread_.message_loop_proxy().get(), |
499 FROM_HERE, | 457 FROM_HERE, |
500 base::Bind( | 458 base::Bind(&av_seek_frame, |
501 &av_seek_frame, | 459 glue_->format_context(), |
502 glue_->format_context(), | 460 -1, |
503 stream_index_for_seeking_, | 461 time.InMicroseconds(), |
504 ConvertToTimeBase(seeking_stream->time_base, time + start_time()), | 462 flags), |
505 // Always seek to a timestamp <= to the desired timestamp. | |
506 AVSEEK_FLAG_BACKWARD), | |
507 base::Bind( | 463 base::Bind( |
508 &FFmpegDemuxer::OnSeekFrameDone, weak_factory_.GetWeakPtr(), cb)); | 464 &FFmpegDemuxer::OnSeekFrameDone, weak_factory_.GetWeakPtr(), cb)); |
509 } | 465 } |
510 | 466 |
511 void FFmpegDemuxer::Initialize(DemuxerHost* host, | 467 void FFmpegDemuxer::Initialize(DemuxerHost* host, |
512 const PipelineStatusCB& status_cb, | 468 const PipelineStatusCB& status_cb, |
513 bool enable_text_tracks) { | 469 bool enable_text_tracks) { |
514 DCHECK(task_runner_->BelongsToCurrentThread()); | 470 DCHECK(task_runner_->BelongsToCurrentThread()); |
515 host_ = host; | 471 host_ = host; |
516 text_enabled_ = enable_text_tracks; | 472 text_enabled_ = enable_text_tracks; |
(...skipping 28 matching lines...) Expand all Loading... |
545 DemuxerStream::Type type) const { | 501 DemuxerStream::Type type) const { |
546 StreamVector::const_iterator iter; | 502 StreamVector::const_iterator iter; |
547 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { | 503 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { |
548 if (*iter && (*iter)->type() == type) { | 504 if (*iter && (*iter)->type() == type) { |
549 return *iter; | 505 return *iter; |
550 } | 506 } |
551 } | 507 } |
552 return NULL; | 508 return NULL; |
553 } | 509 } |
554 | 510 |
| 511 base::TimeDelta FFmpegDemuxer::GetStartTime() const { |
| 512 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 513 return start_time_; |
| 514 } |
| 515 |
555 base::Time FFmpegDemuxer::GetTimelineOffset() const { | 516 base::Time FFmpegDemuxer::GetTimelineOffset() const { |
556 return timeline_offset_; | 517 return timeline_offset_; |
557 } | 518 } |
558 | 519 |
559 Demuxer::Liveness FFmpegDemuxer::GetLiveness() const { | 520 Demuxer::Liveness FFmpegDemuxer::GetLiveness() const { |
560 DCHECK(task_runner_->BelongsToCurrentThread()); | 521 DCHECK(task_runner_->BelongsToCurrentThread()); |
561 return liveness_; | 522 return liveness_; |
562 } | 523 } |
563 | 524 |
564 void FFmpegDemuxer::AddTextStreams() { | 525 void FFmpegDemuxer::AddTextStreams() { |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
660 // partial playback. At least one audio or video stream must be playable. | 621 // partial playback. At least one audio or video stream must be playable. |
661 AVFormatContext* format_context = glue_->format_context(); | 622 AVFormatContext* format_context = glue_->format_context(); |
662 streams_.resize(format_context->nb_streams); | 623 streams_.resize(format_context->nb_streams); |
663 | 624 |
664 AVStream* audio_stream = NULL; | 625 AVStream* audio_stream = NULL; |
665 AudioDecoderConfig audio_config; | 626 AudioDecoderConfig audio_config; |
666 | 627 |
667 AVStream* video_stream = NULL; | 628 AVStream* video_stream = NULL; |
668 VideoDecoderConfig video_config; | 629 VideoDecoderConfig video_config; |
669 | 630 |
670 // If available, |start_time_| will be set to the lowest stream start time. | |
671 start_time_ = kInfiniteDuration(); | |
672 | |
673 base::TimeDelta max_duration; | 631 base::TimeDelta max_duration; |
674 for (size_t i = 0; i < format_context->nb_streams; ++i) { | 632 for (size_t i = 0; i < format_context->nb_streams; ++i) { |
675 AVStream* stream = format_context->streams[i]; | 633 AVStream* stream = format_context->streams[i]; |
676 const AVCodecContext* codec_context = stream->codec; | 634 AVCodecContext* codec_context = stream->codec; |
677 const AVMediaType codec_type = codec_context->codec_type; | 635 AVMediaType codec_type = codec_context->codec_type; |
678 bool discard_negative_timestamps = false; | |
679 | 636 |
680 if (codec_type == AVMEDIA_TYPE_AUDIO) { | 637 if (codec_type == AVMEDIA_TYPE_AUDIO) { |
681 if (audio_stream) | 638 if (audio_stream) |
682 continue; | 639 continue; |
683 | 640 |
684 // Log the codec detected, whether it is supported or not. | 641 // Log the codec detected, whether it is supported or not. |
685 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec", | 642 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec", |
686 codec_context->codec_id); | 643 codec_context->codec_id); |
687 // Ensure the codec is supported. IsValidConfig() also checks that the | 644 // Ensure the codec is supported. IsValidConfig() also checks that the |
688 // channel layout and sample format are valid. | 645 // channel layout and sample format are valid. |
689 AVStreamToAudioDecoderConfig(stream, &audio_config, false); | 646 AVStreamToAudioDecoderConfig(stream, &audio_config, false); |
690 if (!audio_config.IsValidConfig()) | 647 if (!audio_config.IsValidConfig()) |
691 continue; | 648 continue; |
692 audio_stream = stream; | 649 audio_stream = stream; |
693 | |
694 // Enable post-decode frame dropping for packets with negative timestamps | |
695 // as outlined in section A.2 in the Ogg Vorbis spec: | |
696 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html | |
697 discard_negative_timestamps = | |
698 audio_config.codec() == kCodecVorbis && | |
699 strcmp(glue_->format_context()->iformat->name, "ogg") == 0; | |
700 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { | 650 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { |
701 if (video_stream) | 651 if (video_stream) |
702 continue; | 652 continue; |
703 | 653 |
704 // Log the codec detected, whether it is supported or not. | 654 // Log the codec detected, whether it is supported or not. |
705 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", | 655 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", |
706 codec_context->codec_id); | 656 codec_context->codec_id); |
707 // Ensure the codec is supported. IsValidConfig() also checks that the | 657 // Ensure the codec is supported. IsValidConfig() also checks that the |
708 // frame size and visible size are valid. | 658 // frame size and visible size are valid. |
709 AVStreamToVideoDecoderConfig(stream, &video_config, false); | 659 AVStreamToVideoDecoderConfig(stream, &video_config, false); |
710 | 660 |
711 if (!video_config.IsValidConfig()) | 661 if (!video_config.IsValidConfig()) |
712 continue; | 662 continue; |
713 video_stream = stream; | 663 video_stream = stream; |
714 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { | 664 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { |
715 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) { | 665 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) { |
716 continue; | 666 continue; |
717 } | 667 } |
718 } else { | 668 } else { |
719 continue; | 669 continue; |
720 } | 670 } |
721 | 671 |
722 streams_[i] = | 672 streams_[i] = new FFmpegDemuxerStream(this, stream); |
723 new FFmpegDemuxerStream(this, stream, discard_negative_timestamps); | |
724 max_duration = std::max(max_duration, streams_[i]->duration()); | 673 max_duration = std::max(max_duration, streams_[i]->duration()); |
725 | 674 |
726 const base::TimeDelta start_time = ExtractStartTime(stream); | 675 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
727 if (start_time == kNoTimestamp()) | 676 const base::TimeDelta first_dts = ConvertFromTimeBase( |
728 continue; | 677 stream->time_base, stream->first_dts); |
729 | 678 if (start_time_ == kNoTimestamp() || first_dts < start_time_) |
730 // Find the lowest stream start time. Prefer the video stream for seeking | 679 start_time_ = first_dts; |
731 // in the event of matching stream start times. | |
732 if (start_time < start_time_ || | |
733 (codec_type == AVMEDIA_TYPE_VIDEO && start_time <= start_time_)) { | |
734 stream_index_for_seeking_ = i; | |
735 start_time_ = start_time; | |
736 } | 680 } |
737 } | 681 } |
738 | 682 |
739 if (!audio_stream && !video_stream) { | 683 if (!audio_stream && !video_stream) { |
740 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); | 684 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); |
741 return; | 685 return; |
742 } | 686 } |
743 | 687 |
744 if (text_enabled_) | 688 if (text_enabled_) |
745 AddTextStreams(); | 689 AddTextStreams(); |
746 | 690 |
747 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 691 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
748 // If there is a duration value in the container use that to find the | 692 // If there is a duration value in the container use that to find the |
749 // maximum between it and the duration from A/V streams. | 693 // maximum between it and the duration from A/V streams. |
750 const AVRational av_time_base = {1, AV_TIME_BASE}; | 694 const AVRational av_time_base = {1, AV_TIME_BASE}; |
751 max_duration = | 695 max_duration = |
752 std::max(max_duration, | 696 std::max(max_duration, |
753 ConvertFromTimeBase(av_time_base, format_context->duration)); | 697 ConvertFromTimeBase(av_time_base, format_context->duration)); |
754 } else { | 698 } else { |
755 // The duration is unknown, in which case this is likely a live stream. | 699 // The duration is unknown, in which case this is likely a live stream. |
756 max_duration = kInfiniteDuration(); | 700 max_duration = kInfiniteDuration(); |
757 } | 701 } |
758 | 702 |
759 // If no start time could be determined, default to zero and prefer the video | 703 // Some demuxers, like WAV, do not put timestamps on their frames. We |
760 // stream over the audio stream for seeking. E.g., The WAV demuxer does not | 704 // assume the the start time is 0. |
761 // put timestamps on its frames. | 705 if (start_time_ == kNoTimestamp()) |
762 if (start_time_ == kInfiniteDuration()) { | |
763 start_time_ = base::TimeDelta(); | 706 start_time_ = base::TimeDelta(); |
764 stream_index_for_seeking_ = | |
765 video_stream ? video_stream->index : audio_stream->index; | |
766 } | |
767 | 707 |
768 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS | 708 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS |
769 // generation so we always get timestamps, see http://crbug.com/169570 | 709 // generation so we always get timestamps, see http://crbug.com/169570 |
770 if (strcmp(format_context->iformat->name, "avi") == 0) | 710 if (strcmp(format_context->iformat->name, "avi") == 0) |
771 format_context->flags |= AVFMT_FLAG_GENPTS; | 711 format_context->flags |= AVFMT_FLAG_GENPTS; |
772 | 712 |
773 timeline_offset_ = ExtractTimelineOffset(format_context); | 713 timeline_offset_ = ExtractTimelineOffset(format_context); |
774 | 714 |
775 // Since we're shifting the externally visible start time to zero, we need to | |
776 // adjust the timeline offset to compensate. | |
777 if (!timeline_offset_.is_null()) | |
778 timeline_offset_ += start_time_; | |
779 | |
780 if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) { | 715 if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) { |
781 liveness_ = LIVENESS_LIVE; | 716 liveness_ = LIVENESS_LIVE; |
782 } else if (max_duration != kInfiniteDuration()) { | 717 } else if (max_duration != kInfiniteDuration()) { |
783 liveness_ = LIVENESS_RECORDED; | 718 liveness_ = LIVENESS_RECORDED; |
784 } else { | 719 } else { |
785 liveness_ = LIVENESS_UNKNOWN; | 720 liveness_ = LIVENESS_UNKNOWN; |
786 } | 721 } |
787 | 722 |
788 // Good to go: set the duration and bitrate and notify we're done | 723 // Good to go: set the duration and bitrate and notify we're done |
789 // initializing. | 724 // initializing. |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
841 video_codec->time_base.num, | 776 video_codec->time_base.num, |
842 video_codec->time_base.den)); | 777 video_codec->time_base.den)); |
843 media_log_->SetStringProperty( | 778 media_log_->SetStringProperty( |
844 "video_format", VideoFrame::FormatToString(video_config.format())); | 779 "video_format", VideoFrame::FormatToString(video_config.format())); |
845 media_log_->SetBooleanProperty("video_is_encrypted", | 780 media_log_->SetBooleanProperty("video_is_encrypted", |
846 video_config.is_encrypted()); | 781 video_config.is_encrypted()); |
847 } else { | 782 } else { |
848 media_log_->SetBooleanProperty("found_video_stream", false); | 783 media_log_->SetBooleanProperty("found_video_stream", false); |
849 } | 784 } |
850 | 785 |
| 786 |
851 media_log_->SetTimeProperty("max_duration", max_duration); | 787 media_log_->SetTimeProperty("max_duration", max_duration); |
852 media_log_->SetTimeProperty("start_time", start_time_); | 788 media_log_->SetTimeProperty("start_time", start_time_); |
853 media_log_->SetIntegerProperty("bitrate", bitrate_); | 789 media_log_->SetIntegerProperty("bitrate", bitrate_); |
854 | 790 |
855 status_cb.Run(PIPELINE_OK); | 791 status_cb.Run(PIPELINE_OK); |
856 } | 792 } |
857 | 793 |
858 void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) { | 794 void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) { |
859 DCHECK(task_runner_->BelongsToCurrentThread()); | 795 DCHECK(task_runner_->BelongsToCurrentThread()); |
860 CHECK(pending_seek_); | 796 CHECK(pending_seek_); |
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1076 } | 1012 } |
1077 for (size_t i = 0; i < buffered.size(); ++i) | 1013 for (size_t i = 0; i < buffered.size(); ++i) |
1078 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); | 1014 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); |
1079 } | 1015 } |
1080 | 1016 |
1081 void FFmpegDemuxer::OnDataSourceError() { | 1017 void FFmpegDemuxer::OnDataSourceError() { |
1082 host_->OnDemuxerError(PIPELINE_ERROR_READ); | 1018 host_->OnDemuxerError(PIPELINE_ERROR_READ); |
1083 } | 1019 } |
1084 | 1020 |
1085 } // namespace media | 1021 } // namespace media |
OLD | NEW |