OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_demuxer.h" | 5 #include "media/filters/ffmpeg_demuxer.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <string> | 8 #include <string> |
9 | 9 |
10 #include "base/base64.h" | 10 #include "base/base64.h" |
11 #include "base/bind.h" | 11 #include "base/bind.h" |
12 #include "base/callback.h" | 12 #include "base/callback.h" |
13 #include "base/callback_helpers.h" | 13 #include "base/callback_helpers.h" |
14 #include "base/command_line.h" | |
15 #include "base/memory/scoped_ptr.h" | 14 #include "base/memory/scoped_ptr.h" |
16 #include "base/message_loop/message_loop.h" | 15 #include "base/message_loop/message_loop.h" |
17 #include "base/metrics/sparse_histogram.h" | 16 #include "base/metrics/sparse_histogram.h" |
18 #include "base/stl_util.h" | |
19 #include "base/strings/string_util.h" | 17 #include "base/strings/string_util.h" |
20 #include "base/strings/stringprintf.h" | 18 #include "base/strings/stringprintf.h" |
21 #include "base/sys_byteorder.h" | 19 #include "base/sys_byteorder.h" |
22 #include "base/task_runner_util.h" | 20 #include "base/task_runner_util.h" |
23 #include "base/time/time.h" | 21 #include "base/time/time.h" |
24 #include "media/base/audio_decoder_config.h" | 22 #include "media/base/audio_decoder_config.h" |
25 #include "media/base/bind_to_loop.h" | 23 #include "media/base/bind_to_loop.h" |
26 #include "media/base/decoder_buffer.h" | 24 #include "media/base/decoder_buffer.h" |
27 #include "media/base/decrypt_config.h" | 25 #include "media/base/decrypt_config.h" |
28 #include "media/base/limits.h" | 26 #include "media/base/limits.h" |
29 #include "media/base/media_log.h" | 27 #include "media/base/media_log.h" |
30 #include "media/base/media_switches.h" | |
31 #include "media/base/video_decoder_config.h" | 28 #include "media/base/video_decoder_config.h" |
32 #include "media/ffmpeg/ffmpeg_common.h" | 29 #include "media/ffmpeg/ffmpeg_common.h" |
33 #include "media/filters/ffmpeg_glue.h" | 30 #include "media/filters/ffmpeg_glue.h" |
34 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h" | 31 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h" |
| 32 #include "media/filters/webvtt_util.h" |
35 #include "media/webm/webm_crypto_helpers.h" | 33 #include "media/webm/webm_crypto_helpers.h" |
36 | 34 |
37 namespace media { | 35 namespace media { |
38 | 36 |
39 // | 37 // |
40 // FFmpegDemuxerStream | 38 // FFmpegDemuxerStream |
41 // | 39 // |
42 FFmpegDemuxerStream::FFmpegDemuxerStream( | 40 FFmpegDemuxerStream::FFmpegDemuxerStream( |
43 FFmpegDemuxer* demuxer, | 41 FFmpegDemuxer* demuxer, |
44 AVStream* stream) | 42 AVStream* stream) |
(...skipping 13 matching lines...) Expand all Loading... |
58 case AVMEDIA_TYPE_AUDIO: | 56 case AVMEDIA_TYPE_AUDIO: |
59 type_ = AUDIO; | 57 type_ = AUDIO; |
60 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); | 58 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); |
61 is_encrypted = audio_config_.is_encrypted(); | 59 is_encrypted = audio_config_.is_encrypted(); |
62 break; | 60 break; |
63 case AVMEDIA_TYPE_VIDEO: | 61 case AVMEDIA_TYPE_VIDEO: |
64 type_ = VIDEO; | 62 type_ = VIDEO; |
65 AVStreamToVideoDecoderConfig(stream, &video_config_, true); | 63 AVStreamToVideoDecoderConfig(stream, &video_config_, true); |
66 is_encrypted = video_config_.is_encrypted(); | 64 is_encrypted = video_config_.is_encrypted(); |
67 break; | 65 break; |
| 66 case AVMEDIA_TYPE_SUBTITLE: |
| 67 type_ = TEXT; |
| 68 break; |
68 default: | 69 default: |
69 NOTREACHED(); | 70 NOTREACHED(); |
70 break; | 71 break; |
71 } | 72 } |
72 | 73 |
73 // Calculate the duration. | 74 // Calculate the duration. |
74 duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration); | 75 duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration); |
75 | 76 |
76 if (stream_->codec->codec_id == AV_CODEC_ID_H264) { | 77 if (stream_->codec->codec_id == AV_CODEC_ID_H264) { |
77 bitstream_converter_.reset( | 78 bitstream_converter_.reset( |
(...skipping 30 matching lines...) Expand all Loading... |
108 // Convert the packet if there is a bitstream filter. | 109 // Convert the packet if there is a bitstream filter. |
109 if (packet->data && bitstream_converter_enabled_ && | 110 if (packet->data && bitstream_converter_enabled_ && |
110 !bitstream_converter_->ConvertPacket(packet.get())) { | 111 !bitstream_converter_->ConvertPacket(packet.get())) { |
111 LOG(ERROR) << "Format conversion failed."; | 112 LOG(ERROR) << "Format conversion failed."; |
112 } | 113 } |
113 | 114 |
114 // Get side data if any. For now, the only type of side_data is VP8 Alpha. We | 115 // Get side data if any. For now, the only type of side_data is VP8 Alpha. We |
115 // keep this generic so that other side_data types in the future can be | 116 // keep this generic so that other side_data types in the future can be |
116 // handled the same way as well. | 117 // handled the same way as well. |
117 av_packet_split_side_data(packet.get()); | 118 av_packet_split_side_data(packet.get()); |
118 int side_data_size = 0; | 119 scoped_refptr<DecoderBuffer> buffer; |
119 uint8* side_data = av_packet_get_side_data( | |
120 packet.get(), | |
121 AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, | |
122 &side_data_size); | |
123 | 120 |
124 // If a packet is returned by FFmpeg's av_parser_parse2() the packet will | 121 if (type() == DemuxerStream::TEXT) { |
125 // reference inner memory of FFmpeg. As such we should transfer the packet | 122 int id_size = 0; |
126 // into memory we control. | 123 uint8* id_data = av_packet_get_side_data( |
127 scoped_refptr<DecoderBuffer> buffer; | 124 packet.get(), |
128 if (side_data_size > 0) { | 125 AV_PKT_DATA_WEBVTT_IDENTIFIER, |
| 126 &id_size); |
| 127 |
| 128 int settings_size = 0; |
| 129 uint8* settings_data = av_packet_get_side_data( |
| 130 packet.get(), |
| 131 AV_PKT_DATA_WEBVTT_SETTINGS, |
| 132 &settings_size); |
| 133 |
| 134 std::vector<uint8> side_data; |
| 135 MakeSideData(id_data, id_data + id_size, |
| 136 settings_data, settings_data + settings_size, |
| 137 &side_data); |
| 138 |
129 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, | 139 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, |
130 side_data, side_data_size); | 140 side_data.data(), side_data.size()); |
131 } else { | 141 } else { |
132 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size); | 142 int side_data_size = 0; |
133 } | 143 uint8* side_data = av_packet_get_side_data( |
| 144 packet.get(), |
| 145 AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, |
| 146 &side_data_size); |
134 | 147 |
135 int skip_samples_size = 0; | 148 // If a packet is returned by FFmpeg's av_parser_parse2() the packet will |
136 uint8* skip_samples = av_packet_get_side_data(packet.get(), | 149 // reference inner memory of FFmpeg. As such we should transfer the packet |
137 AV_PKT_DATA_SKIP_SAMPLES, | 150 // into memory we control. |
138 &skip_samples_size); | 151 if (side_data_size > 0) { |
139 const int kSkipSamplesValidSize = 10; | 152 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, |
140 const int kSkipSamplesOffset = 4; | 153 side_data, side_data_size); |
141 if (skip_samples_size >= kSkipSamplesValidSize) { | 154 } else { |
142 int discard_padding_samples = base::ByteSwapToLE32( | 155 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size); |
143 *(reinterpret_cast<const uint32*>(skip_samples + | 156 } |
144 kSkipSamplesOffset))); | 157 |
145 // TODO(vigneshv): Change decoder buffer to use number of samples so that | 158 int skip_samples_size = 0; |
146 // this conversion can be avoided. | 159 uint8* skip_samples = av_packet_get_side_data(packet.get(), |
147 buffer->set_discard_padding(base::TimeDelta::FromMicroseconds( | 160 AV_PKT_DATA_SKIP_SAMPLES, |
148 discard_padding_samples * 1000000.0 / | 161 &skip_samples_size); |
149 audio_decoder_config().samples_per_second())); | 162 const int kSkipSamplesValidSize = 10; |
| 163 const int kSkipSamplesOffset = 4; |
| 164 if (skip_samples_size >= kSkipSamplesValidSize) { |
| 165 int discard_padding_samples = base::ByteSwapToLE32( |
| 166 *(reinterpret_cast<const uint32*>(skip_samples + |
| 167 kSkipSamplesOffset))); |
| 168 // TODO(vigneshv): Change decoder buffer to use number of samples so that |
| 169 // this conversion can be avoided. |
| 170 buffer->set_discard_padding(base::TimeDelta::FromMicroseconds( |
| 171 discard_padding_samples * 1000000.0 / |
| 172 audio_decoder_config().samples_per_second())); |
| 173 } |
150 } | 174 } |
151 | 175 |
152 if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) || | 176 if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) || |
153 (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) { | 177 (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) { |
154 scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig( | 178 scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig( |
155 packet->data, packet->size, | 179 packet->data, packet->size, |
156 reinterpret_cast<const uint8*>(encryption_key_id_.data()), | 180 reinterpret_cast<const uint8*>(encryption_key_id_.data()), |
157 encryption_key_id_.size())); | 181 encryption_key_id_.size())); |
158 if (!config) | 182 if (!config) |
159 LOG(ERROR) << "Creation of DecryptConfig failed."; | 183 LOG(ERROR) << "Creation of DecryptConfig failed."; |
160 buffer->set_decrypt_config(config.Pass()); | 184 buffer->set_decrypt_config(config.Pass()); |
161 } | 185 } |
162 | 186 |
163 buffer->set_timestamp(ConvertStreamTimestamp( | 187 buffer->set_timestamp(ConvertStreamTimestamp( |
164 stream_->time_base, packet->pts)); | 188 stream_->time_base, packet->pts)); |
165 buffer->set_duration(ConvertStreamTimestamp( | 189 buffer->set_duration(ConvertStreamTimestamp( |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
283 // TODO(scherkus): Remove early return and reenable time-based capacity | 307 // TODO(scherkus): Remove early return and reenable time-based capacity |
284 // after our data sources support canceling/concurrent reads, see | 308 // after our data sources support canceling/concurrent reads, see |
285 // http://crbug.com/165762 for details. | 309 // http://crbug.com/165762 for details. |
286 return !read_cb_.is_null(); | 310 return !read_cb_.is_null(); |
287 | 311 |
288 // Try to have one second's worth of encoded data per stream. | 312 // Try to have one second's worth of encoded data per stream. |
289 const base::TimeDelta kCapacity = base::TimeDelta::FromSeconds(1); | 313 const base::TimeDelta kCapacity = base::TimeDelta::FromSeconds(1); |
290 return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity; | 314 return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity; |
291 } | 315 } |
292 | 316 |
| 317 TextKind FFmpegDemuxerStream::GetTextKind() const { |
| 318 DCHECK_EQ(type_, DemuxerStream::TEXT); |
| 319 |
| 320 if (stream_->disposition & AV_DISPOSITION_CAPTIONS) |
| 321 return kTextCaptions; |
| 322 |
| 323 if (stream_->disposition & AV_DISPOSITION_DESCRIPTIONS) |
| 324 return kTextDescriptions; |
| 325 |
| 326 if (stream_->disposition & AV_DISPOSITION_METADATA) |
| 327 return kTextMetadata; |
| 328 |
| 329 return kTextSubtitles; |
| 330 } |
| 331 |
| 332 std::string FFmpegDemuxerStream::GetMetadata(const char* key) const { |
| 333 const AVDictionaryEntry* entry = |
| 334 av_dict_get(stream_->metadata, key, NULL, 0); |
| 335 return (entry == NULL || entry->value == NULL) ? "" : entry->value; |
| 336 } |
| 337 |
293 // static | 338 // static |
294 base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp( | 339 base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp( |
295 const AVRational& time_base, int64 timestamp) { | 340 const AVRational& time_base, int64 timestamp) { |
296 if (timestamp == static_cast<int64>(AV_NOPTS_VALUE)) | 341 if (timestamp == static_cast<int64>(AV_NOPTS_VALUE)) |
297 return kNoTimestamp(); | 342 return kNoTimestamp(); |
298 | 343 |
299 return ConvertFromTimeBase(time_base, timestamp); | 344 return ConvertFromTimeBase(time_base, timestamp); |
300 } | 345 } |
301 | 346 |
302 // | 347 // |
303 // FFmpegDemuxer | 348 // FFmpegDemuxer |
304 // | 349 // |
305 FFmpegDemuxer::FFmpegDemuxer( | 350 FFmpegDemuxer::FFmpegDemuxer( |
306 const scoped_refptr<base::MessageLoopProxy>& message_loop, | 351 const scoped_refptr<base::MessageLoopProxy>& message_loop, |
307 DataSource* data_source, | 352 DataSource* data_source, |
308 const NeedKeyCB& need_key_cb, | 353 const NeedKeyCB& need_key_cb, |
309 const scoped_refptr<MediaLog>& media_log) | 354 const scoped_refptr<MediaLog>& media_log) |
310 : host_(NULL), | 355 : host_(NULL), |
311 message_loop_(message_loop), | 356 message_loop_(message_loop), |
312 weak_factory_(this), | 357 weak_factory_(this), |
313 blocking_thread_("FFmpegDemuxer"), | 358 blocking_thread_("FFmpegDemuxer"), |
314 pending_read_(false), | 359 pending_read_(false), |
315 pending_seek_(false), | 360 pending_seek_(false), |
316 data_source_(data_source), | 361 data_source_(data_source), |
317 media_log_(media_log), | 362 media_log_(media_log), |
318 bitrate_(0), | 363 bitrate_(0), |
319 start_time_(kNoTimestamp()), | 364 start_time_(kNoTimestamp()), |
320 audio_disabled_(false), | 365 audio_disabled_(false), |
| 366 text_enabled_(false), |
321 duration_known_(false), | 367 duration_known_(false), |
322 url_protocol_(data_source, BindToLoop(message_loop_, base::Bind( | 368 url_protocol_(data_source, BindToLoop(message_loop_, base::Bind( |
323 &FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))), | 369 &FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))), |
324 need_key_cb_(need_key_cb) { | 370 need_key_cb_(need_key_cb) { |
325 DCHECK(message_loop_.get()); | 371 DCHECK(message_loop_.get()); |
326 DCHECK(data_source_); | 372 DCHECK(data_source_); |
327 } | 373 } |
328 | 374 |
329 FFmpegDemuxer::~FFmpegDemuxer() {} | 375 FFmpegDemuxer::~FFmpegDemuxer() {} |
330 | 376 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
368 audio_disabled_ = true; | 414 audio_disabled_ = true; |
369 StreamVector::iterator iter; | 415 StreamVector::iterator iter; |
370 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { | 416 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { |
371 if (*iter && (*iter)->type() == DemuxerStream::AUDIO) { | 417 if (*iter && (*iter)->type() == DemuxerStream::AUDIO) { |
372 (*iter)->Stop(); | 418 (*iter)->Stop(); |
373 } | 419 } |
374 } | 420 } |
375 } | 421 } |
376 | 422 |
377 void FFmpegDemuxer::Initialize(DemuxerHost* host, | 423 void FFmpegDemuxer::Initialize(DemuxerHost* host, |
378 const PipelineStatusCB& status_cb) { | 424 const PipelineStatusCB& status_cb, |
| 425 bool enable_text_tracks) { |
379 DCHECK(message_loop_->BelongsToCurrentThread()); | 426 DCHECK(message_loop_->BelongsToCurrentThread()); |
380 host_ = host; | 427 host_ = host; |
381 weak_this_ = weak_factory_.GetWeakPtr(); | 428 weak_this_ = weak_factory_.GetWeakPtr(); |
| 429 text_enabled_ = enable_text_tracks; |
382 | 430 |
383 // TODO(scherkus): DataSource should have a host by this point, | 431 // TODO(scherkus): DataSource should have a host by this point, |
384 // see http://crbug.com/122071 | 432 // see http://crbug.com/122071 |
385 data_source_->set_host(host); | 433 data_source_->set_host(host); |
386 | 434 |
387 glue_.reset(new FFmpegGlue(&url_protocol_)); | 435 glue_.reset(new FFmpegGlue(&url_protocol_)); |
388 AVFormatContext* format_context = glue_->format_context(); | 436 AVFormatContext* format_context = glue_->format_context(); |
389 | 437 |
390 // Disable ID3v1 tag reading to avoid costly seeks to end of file for data we | 438 // Disable ID3v1 tag reading to avoid costly seeks to end of file for data we |
391 // don't use. FFmpeg will only read ID3v1 tags if no other metadata is | 439 // don't use. FFmpeg will only read ID3v1 tags if no other metadata is |
(...skipping 23 matching lines...) Expand all Loading... |
415 } | 463 } |
416 } | 464 } |
417 return NULL; | 465 return NULL; |
418 } | 466 } |
419 | 467 |
420 base::TimeDelta FFmpegDemuxer::GetStartTime() const { | 468 base::TimeDelta FFmpegDemuxer::GetStartTime() const { |
421 DCHECK(message_loop_->BelongsToCurrentThread()); | 469 DCHECK(message_loop_->BelongsToCurrentThread()); |
422 return start_time_; | 470 return start_time_; |
423 } | 471 } |
424 | 472 |
| 473 void FFmpegDemuxer::AddTextStreams() { |
| 474 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 475 |
| 476 for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) { |
| 477 FFmpegDemuxerStream* stream = streams_[idx]; |
| 478 if (stream == NULL || stream->type() != DemuxerStream::TEXT) |
| 479 continue; |
| 480 |
| 481 TextKind kind = stream->GetTextKind(); |
| 482 std::string title = stream->GetMetadata("title"); |
| 483 std::string language = stream->GetMetadata("language"); |
| 484 |
| 485 host_->AddTextStream(stream, TextTrackConfig(kind, title, language)); |
| 486 } |
| 487 } |
| 488 |
425 // Helper for calculating the bitrate of the media based on information stored | 489 // Helper for calculating the bitrate of the media based on information stored |
426 // in |format_context| or failing that the size and duration of the media. | 490 // in |format_context| or failing that the size and duration of the media. |
427 // | 491 // |
428 // Returns 0 if a bitrate could not be determined. | 492 // Returns 0 if a bitrate could not be determined. |
429 static int CalculateBitrate( | 493 static int CalculateBitrate( |
430 AVFormatContext* format_context, | 494 AVFormatContext* format_context, |
431 const base::TimeDelta& duration, | 495 const base::TimeDelta& duration, |
432 int64 filesize_in_bytes) { | 496 int64 filesize_in_bytes) { |
433 // If there is a bitrate set on the container, use it. | 497 // If there is a bitrate set on the container, use it. |
434 if (format_context->bit_rate > 0) | 498 if (format_context->bit_rate > 0) |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
533 // Log the codec detected, whether it is supported or not. | 597 // Log the codec detected, whether it is supported or not. |
534 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", | 598 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", |
535 codec_context->codec_id); | 599 codec_context->codec_id); |
536 // Ensure the codec is supported. IsValidConfig() also checks that the | 600 // Ensure the codec is supported. IsValidConfig() also checks that the |
537 // frame size and visible size are valid. | 601 // frame size and visible size are valid. |
538 AVStreamToVideoDecoderConfig(stream, &video_config, false); | 602 AVStreamToVideoDecoderConfig(stream, &video_config, false); |
539 | 603 |
540 if (!video_config.IsValidConfig()) | 604 if (!video_config.IsValidConfig()) |
541 continue; | 605 continue; |
542 video_stream = stream; | 606 video_stream = stream; |
| 607 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { |
| 608 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) { |
| 609 continue; |
| 610 } |
543 } else { | 611 } else { |
544 continue; | 612 continue; |
545 } | 613 } |
546 | 614 |
547 streams_[i] = new FFmpegDemuxerStream(this, stream); | 615 streams_[i] = new FFmpegDemuxerStream(this, stream); |
548 max_duration = std::max(max_duration, streams_[i]->duration()); | 616 max_duration = std::max(max_duration, streams_[i]->duration()); |
549 | 617 |
550 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 618 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
551 const base::TimeDelta first_dts = ConvertFromTimeBase( | 619 const base::TimeDelta first_dts = ConvertFromTimeBase( |
552 stream->time_base, stream->first_dts); | 620 stream->time_base, stream->first_dts); |
553 if (start_time_ == kNoTimestamp() || first_dts < start_time_) | 621 if (start_time_ == kNoTimestamp() || first_dts < start_time_) |
554 start_time_ = first_dts; | 622 start_time_ = first_dts; |
555 } | 623 } |
556 } | 624 } |
557 | 625 |
558 if (!audio_stream && !video_stream) { | 626 if (!audio_stream && !video_stream) { |
559 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); | 627 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); |
560 return; | 628 return; |
561 } | 629 } |
562 | 630 |
| 631 if (text_enabled_) |
| 632 AddTextStreams(); |
| 633 |
563 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 634 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
564 // If there is a duration value in the container use that to find the | 635 // If there is a duration value in the container use that to find the |
565 // maximum between it and the duration from A/V streams. | 636 // maximum between it and the duration from A/V streams. |
566 const AVRational av_time_base = {1, AV_TIME_BASE}; | 637 const AVRational av_time_base = {1, AV_TIME_BASE}; |
567 max_duration = | 638 max_duration = |
568 std::max(max_duration, | 639 std::max(max_duration, |
569 ConvertFromTimeBase(av_time_base, format_context->duration)); | 640 ConvertFromTimeBase(av_time_base, format_context->duration)); |
570 } else { | 641 } else { |
571 // The duration is unknown, in which case this is likely a live stream. | 642 // The duration is unknown, in which case this is likely a live stream. |
572 max_duration = kInfiniteDuration(); | 643 max_duration = kInfiniteDuration(); |
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
847 } | 918 } |
848 for (size_t i = 0; i < buffered.size(); ++i) | 919 for (size_t i = 0; i < buffered.size(); ++i) |
849 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); | 920 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); |
850 } | 921 } |
851 | 922 |
852 void FFmpegDemuxer::OnDataSourceError() { | 923 void FFmpegDemuxer::OnDataSourceError() { |
853 host_->OnDemuxerError(PIPELINE_ERROR_READ); | 924 host_->OnDemuxerError(PIPELINE_ERROR_READ); |
854 } | 925 } |
855 | 926 |
856 } // namespace media | 927 } // namespace media |
OLD | NEW |