Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_demuxer.h" | 5 #include "media/filters/ffmpeg_demuxer.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <string> | 8 #include <string> |
| 9 | 9 |
| 10 #include "base/base64.h" | 10 #include "base/base64.h" |
| 11 #include "base/bind.h" | 11 #include "base/bind.h" |
| 12 #include "base/callback.h" | 12 #include "base/callback.h" |
| 13 #include "base/callback_helpers.h" | 13 #include "base/callback_helpers.h" |
| 14 #include "base/command_line.h" | |
| 15 #include "base/memory/scoped_ptr.h" | 14 #include "base/memory/scoped_ptr.h" |
| 16 #include "base/message_loop/message_loop.h" | 15 #include "base/message_loop/message_loop.h" |
| 17 #include "base/metrics/sparse_histogram.h" | 16 #include "base/metrics/sparse_histogram.h" |
| 18 #include "base/stl_util.h" | 17 #include "base/stl_util.h" |
| 19 #include "base/strings/string_util.h" | 18 #include "base/strings/string_util.h" |
| 20 #include "base/strings/stringprintf.h" | 19 #include "base/strings/stringprintf.h" |
| 21 #include "base/task_runner_util.h" | 20 #include "base/task_runner_util.h" |
| 22 #include "base/time/time.h" | 21 #include "base/time/time.h" |
| 23 #include "media/base/audio_decoder_config.h" | 22 #include "media/base/audio_decoder_config.h" |
| 24 #include "media/base/bind_to_loop.h" | 23 #include "media/base/bind_to_loop.h" |
| 25 #include "media/base/decoder_buffer.h" | 24 #include "media/base/decoder_buffer.h" |
| 26 #include "media/base/decrypt_config.h" | 25 #include "media/base/decrypt_config.h" |
| 27 #include "media/base/limits.h" | 26 #include "media/base/limits.h" |
| 28 #include "media/base/media_log.h" | 27 #include "media/base/media_log.h" |
| 29 #include "media/base/media_switches.h" | |
| 30 #include "media/base/video_decoder_config.h" | 28 #include "media/base/video_decoder_config.h" |
| 31 #include "media/ffmpeg/ffmpeg_common.h" | 29 #include "media/ffmpeg/ffmpeg_common.h" |
| 32 #include "media/filters/ffmpeg_glue.h" | 30 #include "media/filters/ffmpeg_glue.h" |
| 33 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h" | 31 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h" |
| 34 #include "media/webm/webm_crypto_helpers.h" | 32 #include "media/webm/webm_crypto_helpers.h" |
| 35 | 33 |
| 36 namespace media { | 34 namespace media { |
| 37 | 35 |
| 38 // | 36 // |
| 39 // FFmpegDemuxerStream | 37 // FFmpegDemuxerStream |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 57 case AVMEDIA_TYPE_AUDIO: | 55 case AVMEDIA_TYPE_AUDIO: |
| 58 type_ = AUDIO; | 56 type_ = AUDIO; |
| 59 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); | 57 AVStreamToAudioDecoderConfig(stream, &audio_config_, true); |
| 60 is_encrypted = audio_config_.is_encrypted(); | 58 is_encrypted = audio_config_.is_encrypted(); |
| 61 break; | 59 break; |
| 62 case AVMEDIA_TYPE_VIDEO: | 60 case AVMEDIA_TYPE_VIDEO: |
| 63 type_ = VIDEO; | 61 type_ = VIDEO; |
| 64 AVStreamToVideoDecoderConfig(stream, &video_config_, true); | 62 AVStreamToVideoDecoderConfig(stream, &video_config_, true); |
| 65 is_encrypted = video_config_.is_encrypted(); | 63 is_encrypted = video_config_.is_encrypted(); |
| 66 break; | 64 break; |
| 65 case AVMEDIA_TYPE_SUBTITLE: | |
| 66 type_ = TEXT; | |
| 67 break; | |
| 67 default: | 68 default: |
| 68 NOTREACHED(); | 69 NOTREACHED(); |
| 69 break; | 70 break; |
| 70 } | 71 } |
| 71 | 72 |
| 72 // Calculate the duration. | 73 // Calculate the duration. |
| 73 duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration); | 74 duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration); |
| 74 | 75 |
| 75 if (stream_->codec->codec_id == AV_CODEC_ID_H264) { | 76 if (stream_->codec->codec_id == AV_CODEC_ID_H264) { |
| 76 bitstream_converter_.reset( | 77 bitstream_converter_.reset( |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 103 NOTREACHED() << "Attempted to enqueue packet on a stopped stream"; | 104 NOTREACHED() << "Attempted to enqueue packet on a stopped stream"; |
| 104 return; | 105 return; |
| 105 } | 106 } |
| 106 | 107 |
| 107 // Convert the packet if there is a bitstream filter. | 108 // Convert the packet if there is a bitstream filter. |
| 108 if (packet->data && bitstream_converter_enabled_ && | 109 if (packet->data && bitstream_converter_enabled_ && |
| 109 !bitstream_converter_->ConvertPacket(packet.get())) { | 110 !bitstream_converter_->ConvertPacket(packet.get())) { |
| 110 LOG(ERROR) << "Format conversion failed."; | 111 LOG(ERROR) << "Format conversion failed."; |
| 111 } | 112 } |
| 112 | 113 |
| 113 // Get side data if any. For now, the only type of side_data is VP8 Alpha. We | 114 scoped_refptr<DecoderBuffer> buffer; |
| 114 // keep this generic so that other side_data types in the future can be | 115 |
| 115 // handled the same way as well. | 116 // Get side data if any. For now, the only types of side_data are VP8 Alpha, |
| 117 // and WebVTT id and settings. We keep this generic so that other side_data | |
| 118 // types in the future can be handled the same way as well. | |
| 116 av_packet_split_side_data(packet.get()); | 119 av_packet_split_side_data(packet.get()); |
| 117 int side_data_size = 0; | 120 if (type() == DemuxerStream::TEXT) { |
| 118 uint8* side_data = av_packet_get_side_data( | 121 int id_size = 0; |
| 119 packet.get(), | 122 uint8* id_data = av_packet_get_side_data( |
| 120 AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, | 123 packet.get(), |
| 121 &side_data_size); | 124 AV_PKT_DATA_WEBVTT_IDENTIFIER, |
| 125 &id_size); | |
| 122 | 126 |
| 123 // If a packet is returned by FFmpeg's av_parser_parse2() the packet will | 127 int settings_size = 0; |
| 124 // reference inner memory of FFmpeg. As such we should transfer the packet | 128 uint8* settings_data = av_packet_get_side_data( |
| 125 // into memory we control. | 129 packet.get(), |
| 126 scoped_refptr<DecoderBuffer> buffer; | 130 AV_PKT_DATA_WEBVTT_SETTINGS, |
| 127 if (side_data_size > 0) { | 131 &settings_size); |
| 132 | |
| 133 // The DecoderBuffer only supports a single side data item. In the case of | |
| 134 // a WebVTT cue, we can have potentially two side data items. In order to | |
| 135 // avoid disrupting DecoderBuffer any more than we need to, we copy both | |
| 136 // side data items onto a single one, and terminate each with a NUL marker. | |
| 137 std::basic_string<uint8> side_data; | |
|
acolwell GONE FROM CHROMIUM
2013/10/08 15:45:24
nit:Use std::vector<uint8> to avoid any issues wit
Matthew Heaney (Chromium)
2013/10/13 05:30:17
Done.
| |
| 138 side_data.append(id_data, id_size); | |
| 139 side_data.append(1, 0); | |
| 140 side_data.append(settings_data, settings_size); | |
| 141 side_data.append(1, 0); | |
| 142 | |
| 128 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, | 143 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, |
| 129 side_data, side_data_size); | 144 side_data.data(), side_data.length()); |
| 130 } else { | 145 } else { |
| 131 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size); | 146 int side_data_size = 0; |
| 147 uint8* side_data = av_packet_get_side_data( | |
| 148 packet.get(), | |
| 149 AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, | |
| 150 &side_data_size); | |
| 151 | |
| 152 // If a packet is returned by FFmpeg's av_parser_parse2() the packet will | |
| 153 // reference inner memory of FFmpeg. As such we should transfer the packet | |
| 154 // into memory we control. | |
| 155 if (side_data_size > 0) { | |
| 156 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size, | |
| 157 side_data, side_data_size); | |
| 158 } else { | |
| 159 buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size); | |
| 160 } | |
| 132 } | 161 } |
| 133 | 162 |
| 134 if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) || | 163 if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) || |
| 135 (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) { | 164 (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) { |
| 136 scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig( | 165 scoped_ptr<DecryptConfig> config(WebMCreateDecryptConfig( |
| 137 packet->data, packet->size, | 166 packet->data, packet->size, |
| 138 reinterpret_cast<const uint8*>(encryption_key_id_.data()), | 167 reinterpret_cast<const uint8*>(encryption_key_id_.data()), |
| 139 encryption_key_id_.size())); | 168 encryption_key_id_.size())); |
| 140 if (!config) | 169 if (!config) |
| 141 LOG(ERROR) << "Creation of DecryptConfig failed."; | 170 LOG(ERROR) << "Creation of DecryptConfig failed."; |
| 142 buffer->set_decrypt_config(config.Pass()); | 171 buffer->set_decrypt_config(config.Pass()); |
| 143 } | 172 } |
| 144 | 173 |
| 145 buffer->set_timestamp(ConvertStreamTimestamp( | 174 buffer->set_timestamp(ConvertStreamTimestamp( |
| 146 stream_->time_base, packet->pts)); | 175 stream_->time_base, packet->pts)); |
| 147 buffer->set_duration(ConvertStreamTimestamp( | 176 buffer->set_duration(ConvertStreamTimestamp( |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 265 // TODO(scherkus): Remove early return and reenable time-based capacity | 294 // TODO(scherkus): Remove early return and reenable time-based capacity |
| 266 // after our data sources support canceling/concurrent reads, see | 295 // after our data sources support canceling/concurrent reads, see |
| 267 // http://crbug.com/165762 for details. | 296 // http://crbug.com/165762 for details. |
| 268 return !read_cb_.is_null(); | 297 return !read_cb_.is_null(); |
| 269 | 298 |
| 270 // Try to have one second's worth of encoded data per stream. | 299 // Try to have one second's worth of encoded data per stream. |
| 271 const base::TimeDelta kCapacity = base::TimeDelta::FromSeconds(1); | 300 const base::TimeDelta kCapacity = base::TimeDelta::FromSeconds(1); |
| 272 return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity; | 301 return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity; |
| 273 } | 302 } |
| 274 | 303 |
| 304 TextKind FFmpegDemuxerStream::GetTextKind() const { | |
| 305 if (type_ != DemuxerStream::TEXT) | |
|
acolwell GONE FROM CHROMIUM
2013/10/08 15:45:24
nit: Convert this to a DCHECK since calling this o
Matthew Heaney (Chromium)
2013/10/13 05:30:17
Done.
| |
| 306 return kTextNone; | |
| 307 | |
| 308 TextKind kind; | |
| 309 | |
| 310 if (stream_->disposition & AV_DISPOSITION_CAPTIONS) { | |
| 311 kind = kTextCaptions; | |
|
acolwell GONE FROM CHROMIUM
2013/10/08 15:45:24
nit: Just return here and below & drop the elses.
Matthew Heaney (Chromium)
2013/10/13 05:30:17
Done.
| |
| 312 } else if (stream_->disposition & AV_DISPOSITION_DESCRIPTIONS) { | |
| 313 kind = kTextDescriptions; | |
| 314 } else if (stream_->disposition & AV_DISPOSITION_METADATA) { | |
| 315 kind = kTextMetadata; | |
| 316 } else { | |
| 317 kind = kTextSubtitles; | |
| 318 } | |
| 319 | |
| 320 return kind; | |
| 321 } | |
| 322 | |
| 323 std::string FFmpegDemuxerStream::GetMetadata(const char* key) const { | |
| 324 const AVDictionaryEntry* entry = | |
| 325 av_dict_get(stream_->metadata, key, NULL, 0); | |
| 326 return (entry == NULL || entry->value == NULL) ? "" : entry->value; | |
| 327 } | |
| 328 | |
| 275 // static | 329 // static |
| 276 base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp( | 330 base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp( |
| 277 const AVRational& time_base, int64 timestamp) { | 331 const AVRational& time_base, int64 timestamp) { |
| 278 if (timestamp == static_cast<int64>(AV_NOPTS_VALUE)) | 332 if (timestamp == static_cast<int64>(AV_NOPTS_VALUE)) |
| 279 return kNoTimestamp(); | 333 return kNoTimestamp(); |
| 280 | 334 |
| 281 return ConvertFromTimeBase(time_base, timestamp); | 335 return ConvertFromTimeBase(time_base, timestamp); |
| 282 } | 336 } |
| 283 | 337 |
| 284 // | 338 // |
| 285 // FFmpegDemuxer | 339 // FFmpegDemuxer |
| 286 // | 340 // |
| 287 FFmpegDemuxer::FFmpegDemuxer( | 341 FFmpegDemuxer::FFmpegDemuxer( |
| 288 const scoped_refptr<base::MessageLoopProxy>& message_loop, | 342 const scoped_refptr<base::MessageLoopProxy>& message_loop, |
| 289 DataSource* data_source, | 343 DataSource* data_source, |
| 290 const NeedKeyCB& need_key_cb, | 344 const NeedKeyCB& need_key_cb, |
| 345 bool text_enabled, | |
| 291 const scoped_refptr<MediaLog>& media_log) | 346 const scoped_refptr<MediaLog>& media_log) |
| 292 : host_(NULL), | 347 : host_(NULL), |
| 293 message_loop_(message_loop), | 348 message_loop_(message_loop), |
| 294 weak_factory_(this), | 349 weak_factory_(this), |
| 295 blocking_thread_("FFmpegDemuxer"), | 350 blocking_thread_("FFmpegDemuxer"), |
| 296 pending_read_(false), | 351 pending_read_(false), |
| 297 pending_seek_(false), | 352 pending_seek_(false), |
| 298 data_source_(data_source), | 353 data_source_(data_source), |
| 299 media_log_(media_log), | 354 media_log_(media_log), |
| 300 bitrate_(0), | 355 bitrate_(0), |
| 301 start_time_(kNoTimestamp()), | 356 start_time_(kNoTimestamp()), |
| 302 audio_disabled_(false), | 357 audio_disabled_(false), |
| 358 text_enabled_(text_enabled), | |
| 303 duration_known_(false), | 359 duration_known_(false), |
| 304 url_protocol_(data_source, BindToLoop(message_loop_, base::Bind( | 360 url_protocol_(data_source, BindToLoop(message_loop_, base::Bind( |
| 305 &FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))), | 361 &FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))), |
| 306 need_key_cb_(need_key_cb) { | 362 need_key_cb_(need_key_cb) { |
| 307 DCHECK(message_loop_.get()); | 363 DCHECK(message_loop_.get()); |
| 308 DCHECK(data_source_); | 364 DCHECK(data_source_); |
| 309 } | 365 } |
| 310 | 366 |
| 311 FFmpegDemuxer::~FFmpegDemuxer() {} | 367 FFmpegDemuxer::~FFmpegDemuxer() {} |
| 312 | 368 |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 402 } | 458 } |
| 403 } | 459 } |
| 404 return NULL; | 460 return NULL; |
| 405 } | 461 } |
| 406 | 462 |
| 407 base::TimeDelta FFmpegDemuxer::GetStartTime() const { | 463 base::TimeDelta FFmpegDemuxer::GetStartTime() const { |
| 408 DCHECK(message_loop_->BelongsToCurrentThread()); | 464 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 409 return start_time_; | 465 return start_time_; |
| 410 } | 466 } |
| 411 | 467 |
| 468 void FFmpegDemuxer::AddTextStreams() { | |
| 469 DCHECK(message_loop_->BelongsToCurrentThread()); | |
| 470 | |
| 471 for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) { | |
| 472 FFmpegDemuxerStream* stream = streams_[idx]; | |
| 473 if (stream == NULL || stream->type() != DemuxerStream::TEXT) | |
| 474 continue; | |
| 475 | |
| 476 TextKind kind = stream->GetTextKind(); | |
| 477 DCHECK_NE(kind, kTextNone); | |
|
acolwell GONE FROM CHROMIUM
2013/10/08 15:45:24
nit: You shouldn't need this DCHECK
Matthew Heaney (Chromium)
2013/10/13 05:30:17
Done.
| |
| 478 | |
| 479 std::string title = stream->GetMetadata("title"); | |
| 480 std::string language = stream->GetMetadata("language"); | |
| 481 | |
| 482 host_->AddTextStream(stream, kind, title, language); | |
| 483 } | |
| 484 } | |
| 485 | |
| 412 // Helper for calculating the bitrate of the media based on information stored | 486 // Helper for calculating the bitrate of the media based on information stored |
| 413 // in |format_context| or failing that the size and duration of the media. | 487 // in |format_context| or failing that the size and duration of the media. |
| 414 // | 488 // |
| 415 // Returns 0 if a bitrate could not be determined. | 489 // Returns 0 if a bitrate could not be determined. |
| 416 static int CalculateBitrate( | 490 static int CalculateBitrate( |
| 417 AVFormatContext* format_context, | 491 AVFormatContext* format_context, |
| 418 const base::TimeDelta& duration, | 492 const base::TimeDelta& duration, |
| 419 int64 filesize_in_bytes) { | 493 int64 filesize_in_bytes) { |
| 420 // If there is a bitrate set on the container, use it. | 494 // If there is a bitrate set on the container, use it. |
| 421 if (format_context->bit_rate > 0) | 495 if (format_context->bit_rate > 0) |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 520 // Log the codec detected, whether it is supported or not. | 594 // Log the codec detected, whether it is supported or not. |
| 521 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", | 595 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec", |
| 522 codec_context->codec_id); | 596 codec_context->codec_id); |
| 523 // Ensure the codec is supported. IsValidConfig() also checks that the | 597 // Ensure the codec is supported. IsValidConfig() also checks that the |
| 524 // frame size and visible size are valid. | 598 // frame size and visible size are valid. |
| 525 AVStreamToVideoDecoderConfig(stream, &video_config, false); | 599 AVStreamToVideoDecoderConfig(stream, &video_config, false); |
| 526 | 600 |
| 527 if (!video_config.IsValidConfig()) | 601 if (!video_config.IsValidConfig()) |
| 528 continue; | 602 continue; |
| 529 video_stream = stream; | 603 video_stream = stream; |
| 604 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) { | |
| 605 if (codec_context->codec_id != AV_CODEC_ID_WEBVTT) { | |
| 606 continue; | |
| 607 } | |
| 530 } else { | 608 } else { |
| 531 continue; | 609 continue; |
| 532 } | 610 } |
| 533 | 611 |
| 534 streams_[i] = new FFmpegDemuxerStream(this, stream); | 612 streams_[i] = new FFmpegDemuxerStream(this, stream); |
| 535 max_duration = std::max(max_duration, streams_[i]->duration()); | 613 max_duration = std::max(max_duration, streams_[i]->duration()); |
| 536 | 614 |
| 537 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 615 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
| 538 const base::TimeDelta first_dts = ConvertFromTimeBase( | 616 const base::TimeDelta first_dts = ConvertFromTimeBase( |
| 539 stream->time_base, stream->first_dts); | 617 stream->time_base, stream->first_dts); |
| 540 if (start_time_ == kNoTimestamp() || first_dts < start_time_) | 618 if (start_time_ == kNoTimestamp() || first_dts < start_time_) |
| 541 start_time_ = first_dts; | 619 start_time_ = first_dts; |
| 542 } | 620 } |
| 543 } | 621 } |
| 544 | 622 |
| 545 if (!audio_stream && !video_stream) { | 623 if (!audio_stream && !video_stream) { |
| 546 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); | 624 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); |
| 547 return; | 625 return; |
| 548 } | 626 } |
| 549 | 627 |
| 628 if (text_enabled_) | |
| 629 AddTextStreams(); | |
| 630 | |
| 550 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 631 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
| 551 // If there is a duration value in the container use that to find the | 632 // If there is a duration value in the container use that to find the |
| 552 // maximum between it and the duration from A/V streams. | 633 // maximum between it and the duration from A/V streams. |
| 553 const AVRational av_time_base = {1, AV_TIME_BASE}; | 634 const AVRational av_time_base = {1, AV_TIME_BASE}; |
| 554 max_duration = | 635 max_duration = |
| 555 std::max(max_duration, | 636 std::max(max_duration, |
| 556 ConvertFromTimeBase(av_time_base, format_context->duration)); | 637 ConvertFromTimeBase(av_time_base, format_context->duration)); |
| 557 } else { | 638 } else { |
| 558 // The duration is unknown, in which case this is likely a live stream. | 639 // The duration is unknown, in which case this is likely a live stream. |
| 559 max_duration = kInfiniteDuration(); | 640 max_duration = kInfiniteDuration(); |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 752 new_packet->pos = packet->pos; | 833 new_packet->pos = packet->pos; |
| 753 new_packet->duration = packet->duration; | 834 new_packet->duration = packet->duration; |
| 754 new_packet->convergence_duration = packet->convergence_duration; | 835 new_packet->convergence_duration = packet->convergence_duration; |
| 755 new_packet->flags = packet->flags; | 836 new_packet->flags = packet->flags; |
| 756 new_packet->stream_index = packet->stream_index; | 837 new_packet->stream_index = packet->stream_index; |
| 757 | 838 |
| 758 packet.swap(new_packet); | 839 packet.swap(new_packet); |
| 759 } | 840 } |
| 760 | 841 |
| 761 FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index]; | 842 FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index]; |
| 762 demuxer_stream->EnqueuePacket(packet.Pass()); | 843 |
| 844 if (demuxer_stream->type() != DemuxerStream::TEXT || text_enabled_) | |
|
acolwell GONE FROM CHROMIUM
2013/10/08 15:45:24
nit: It seems like the text_enabled_ check should
Matthew Heaney (Chromium)
2013/10/13 05:30:17
Done.
| |
| 845 demuxer_stream->EnqueuePacket(packet.Pass()); | |
| 763 } | 846 } |
| 764 | 847 |
| 765 // Keep reading until we've reached capacity. | 848 // Keep reading until we've reached capacity. |
| 766 ReadFrameIfNeeded(); | 849 ReadFrameIfNeeded(); |
| 767 } | 850 } |
| 768 | 851 |
| 769 void FFmpegDemuxer::OnDataSourceStopped(const base::Closure& callback) { | 852 void FFmpegDemuxer::OnDataSourceStopped(const base::Closure& callback) { |
| 770 // This will block until all tasks complete. Note that after this returns it's | 853 // This will block until all tasks complete. Note that after this returns it's |
| 771 // possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this | 854 // possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this |
| 772 // thread. Each of the reply task methods must check whether we've stopped the | 855 // thread. Each of the reply task methods must check whether we've stopped the |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 834 } | 917 } |
| 835 for (size_t i = 0; i < buffered.size(); ++i) | 918 for (size_t i = 0; i < buffered.size(); ++i) |
| 836 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); | 919 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); |
| 837 } | 920 } |
| 838 | 921 |
| 839 void FFmpegDemuxer::OnDataSourceError() { | 922 void FFmpegDemuxer::OnDataSourceError() { |
| 840 host_->OnDemuxerError(PIPELINE_ERROR_READ); | 923 host_->OnDemuxerError(PIPELINE_ERROR_READ); |
| 841 } | 924 } |
| 842 | 925 |
| 843 } // namespace media | 926 } // namespace media |
| OLD | NEW |