Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_demuxer.h" | 5 #include "media/filters/ffmpeg_demuxer.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <string> | 8 #include <string> |
| 9 | 9 |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 30 // FFmpegDemuxerStream | 30 // FFmpegDemuxerStream |
| 31 // | 31 // |
| 32 FFmpegDemuxerStream::FFmpegDemuxerStream( | 32 FFmpegDemuxerStream::FFmpegDemuxerStream( |
| 33 FFmpegDemuxer* demuxer, | 33 FFmpegDemuxer* demuxer, |
| 34 AVStream* stream) | 34 AVStream* stream) |
| 35 : demuxer_(demuxer), | 35 : demuxer_(demuxer), |
| 36 stream_(stream), | 36 stream_(stream), |
| 37 type_(UNKNOWN), | 37 type_(UNKNOWN), |
| 38 stopped_(false), | 38 stopped_(false), |
| 39 last_packet_timestamp_(kNoTimestamp()), | 39 last_packet_timestamp_(kNoTimestamp()), |
| 40 bitstream_converter_enabled_(false) { | 40 bitstream_converter_enabled_(false) { |
|
Ami GONE FROM CHROMIUM
2012/09/27 03:24:45
Can you revert this and the rest of the madness fr
| |
| 41 DCHECK(demuxer_); | 41 DCHECK(demuxer_); |
| 42 | 42 |
| 43 // Determine our media format. | 43 // Determine our media format. |
| 44 switch (stream->codec->codec_type) { | 44 switch (stream->codec->codec_type) { |
| 45 case AVMEDIA_TYPE_AUDIO: | 45 case AVMEDIA_TYPE_AUDIO: |
| 46 type_ = AUDIO; | 46 type_ = AUDIO; |
| 47 AVCodecContextToAudioDecoderConfig(stream->codec, &audio_config_); | 47 AVCodecContextToAudioDecoderConfig(stream->codec, &audio_config_); |
| 48 break; | 48 break; |
| 49 case AVMEDIA_TYPE_VIDEO: | 49 case AVMEDIA_TYPE_VIDEO: |
| 50 type_ = VIDEO; | 50 type_ = VIDEO; |
| (...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 255 } | 255 } |
| 256 | 256 |
| 257 // | 257 // |
| 258 // FFmpegDemuxer | 258 // FFmpegDemuxer |
| 259 // | 259 // |
| 260 FFmpegDemuxer::FFmpegDemuxer( | 260 FFmpegDemuxer::FFmpegDemuxer( |
| 261 const scoped_refptr<base::MessageLoopProxy>& message_loop, | 261 const scoped_refptr<base::MessageLoopProxy>& message_loop, |
| 262 const scoped_refptr<DataSource>& data_source) | 262 const scoped_refptr<DataSource>& data_source) |
| 263 : host_(NULL), | 263 : host_(NULL), |
| 264 message_loop_(message_loop), | 264 message_loop_(message_loop), |
| 265 format_context_(NULL), | |
| 266 data_source_(data_source), | 265 data_source_(data_source), |
| 267 read_event_(false, false), | 266 read_event_(false, false), |
| 268 read_has_failed_(false), | 267 read_has_failed_(false), |
| 269 last_read_bytes_(0), | 268 last_read_bytes_(0), |
| 270 read_position_(0), | 269 read_position_(0), |
| 271 bitrate_(0), | 270 bitrate_(0), |
| 272 start_time_(kNoTimestamp()), | 271 start_time_(kNoTimestamp()), |
| 273 audio_disabled_(false), | 272 audio_disabled_(false), |
| 274 duration_known_(false) { | 273 duration_known_(false) { |
| 275 DCHECK(message_loop_); | 274 DCHECK(message_loop_); |
| 276 DCHECK(data_source_); | 275 DCHECK(data_source_); |
| 277 } | 276 } |
| 278 | 277 |
| 279 FFmpegDemuxer::~FFmpegDemuxer() { | 278 FFmpegDemuxer::~FFmpegDemuxer() {} |
| 280 // In this destructor, we clean up resources held by FFmpeg. It is ugly to | |
| 281 // close the codec contexts here because the corresponding codecs are opened | |
| 282 // in the decoder filters. By reaching this point, all filters should have | |
| 283 // stopped, so this is the only safe place to do the global clean up. | |
| 284 // TODO(hclam): close the codecs in the corresponding decoders. | |
| 285 if (!format_context_) | |
| 286 return; | |
| 287 | |
| 288 DestroyAVFormatContext(format_context_); | |
| 289 format_context_ = NULL; | |
|
Ami GONE FROM CHROMIUM
2012/09/27 03:24:45
Yay!
| |
| 290 } | |
| 291 | 279 |
| 292 void FFmpegDemuxer::PostDemuxTask() { | 280 void FFmpegDemuxer::PostDemuxTask() { |
| 293 message_loop_->PostTask(FROM_HERE, | 281 message_loop_->PostTask(FROM_HERE, |
| 294 base::Bind(&FFmpegDemuxer::DemuxTask, this)); | 282 base::Bind(&FFmpegDemuxer::DemuxTask, this)); |
| 295 } | 283 } |
| 296 | 284 |
| 297 void FFmpegDemuxer::Stop(const base::Closure& callback) { | 285 void FFmpegDemuxer::Stop(const base::Closure& callback) { |
| 298 // Post a task to notify the streams to stop as well. | 286 // Post a task to notify the streams to stop as well. |
| 299 message_loop_->PostTask(FROM_HERE, | 287 message_loop_->PostTask(FROM_HERE, |
| 300 base::Bind(&FFmpegDemuxer::StopTask, this, callback)); | 288 base::Bind(&FFmpegDemuxer::StopTask, this, callback)); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 337 return *iter; | 325 return *iter; |
| 338 } | 326 } |
| 339 } | 327 } |
| 340 return NULL; | 328 return NULL; |
| 341 } | 329 } |
| 342 | 330 |
| 343 base::TimeDelta FFmpegDemuxer::GetStartTime() const { | 331 base::TimeDelta FFmpegDemuxer::GetStartTime() const { |
| 344 return start_time_; | 332 return start_time_; |
| 345 } | 333 } |
| 346 | 334 |
| 347 size_t FFmpegDemuxer::Read(size_t size, uint8* data) { | 335 int FFmpegDemuxer::Read(int size, uint8* data) { |
| 348 DCHECK(host_); | 336 DCHECK(host_); |
| 349 DCHECK(data_source_); | 337 DCHECK(data_source_); |
| 350 | 338 |
| 351 // If read has ever failed, return with an error. | 339 // If read has ever failed, return with an error. |
| 352 // TODO(hclam): use a more meaningful constant as error. | 340 // TODO(hclam): use a more meaningful constant as error. |
| 353 if (read_has_failed_) | 341 if (read_has_failed_) |
| 354 return AVERROR(EIO); | 342 return AVERROR(EIO); |
| 355 | 343 |
| 356 // Even though FFmpeg defines AVERROR_EOF, it's not to be used with I/O | 344 // Even though FFmpeg defines AVERROR_EOF, it's not to be used with I/O |
| 357 // routines. Instead return 0 for any read at or past EOF. | 345 // routines. Instead return 0 for any read at or past EOF. |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 453 | 441 |
| 454 void FFmpegDemuxer::InitializeTask(DemuxerHost* host, | 442 void FFmpegDemuxer::InitializeTask(DemuxerHost* host, |
| 455 const PipelineStatusCB& status_cb) { | 443 const PipelineStatusCB& status_cb) { |
| 456 DCHECK(message_loop_->BelongsToCurrentThread()); | 444 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 457 host_ = host; | 445 host_ = host; |
| 458 | 446 |
| 459 // TODO(scherkus): DataSource should have a host by this point, | 447 // TODO(scherkus): DataSource should have a host by this point, |
| 460 // see http://crbug.com/122071 | 448 // see http://crbug.com/122071 |
| 461 data_source_->set_host(host); | 449 data_source_->set_host(host); |
| 462 | 450 |
| 463 // Add ourself to Protocol list and get our unique key. | 451 glue_.reset(new FFmpegGlue(this)); |
| 464 std::string key = FFmpegGlue::GetInstance()->AddProtocol(this); | 452 AVFormatContext* format_context = glue_->format_context(); |
| 465 | |
| 466 // Open FFmpeg AVFormatContext. | |
| 467 DCHECK(!format_context_); | |
| 468 AVFormatContext* context = avformat_alloc_context(); | |
| 469 | 453 |
| 470 // Disable ID3v1 tag reading to avoid costly seeks to end of file for data we | 454 // Disable ID3v1 tag reading to avoid costly seeks to end of file for data we |
| 471 // don't use. FFmpeg will only read ID3v1 tags if no other metadata is | 455 // don't use. FFmpeg will only read ID3v1 tags if no other metadata is |
| 472 // available, so add a metadata entry to ensure some is always present. | 456 // available, so add a metadata entry to ensure some is always present. |
| 473 av_dict_set(&context->metadata, "skip_id3v1_tags", "", 0); | 457 av_dict_set(&format_context->metadata, "skip_id3v1_tags", "", 0); |
| 474 | 458 |
| 475 int result = avformat_open_input(&context, key.c_str(), NULL, NULL); | 459 int result = glue_->OpenContext(); |
| 476 | |
| 477 // Remove ourself from protocol list. | |
| 478 FFmpegGlue::GetInstance()->RemoveProtocol(this); | |
| 479 | |
| 480 if (result < 0) { | 460 if (result < 0) { |
| 481 status_cb.Run(DEMUXER_ERROR_COULD_NOT_OPEN); | 461 status_cb.Run(DEMUXER_ERROR_COULD_NOT_OPEN); |
| 482 return; | 462 return; |
| 483 } | 463 } |
| 484 | 464 |
| 485 DCHECK(context); | |
| 486 format_context_ = context; | |
| 487 | |
| 488 // Fully initialize AVFormatContext by parsing the stream a little. | 465 // Fully initialize AVFormatContext by parsing the stream a little. |
| 489 result = avformat_find_stream_info(format_context_, NULL); | 466 result = avformat_find_stream_info(format_context, NULL); |
| 490 if (result < 0) { | 467 if (result < 0) { |
| 491 status_cb.Run(DEMUXER_ERROR_COULD_NOT_PARSE); | 468 status_cb.Run(DEMUXER_ERROR_COULD_NOT_PARSE); |
| 492 return; | 469 return; |
| 493 } | 470 } |
| 494 | 471 |
| 495 // Create demuxer stream entries for each possible AVStream. | 472 // Create demuxer stream entries for each possible AVStream. |
| 496 streams_.resize(format_context_->nb_streams); | 473 streams_.resize(format_context->nb_streams); |
| 497 bool found_audio_stream = false; | 474 bool found_audio_stream = false; |
| 498 bool found_video_stream = false; | 475 bool found_video_stream = false; |
| 499 | 476 |
| 500 base::TimeDelta max_duration; | 477 base::TimeDelta max_duration; |
| 501 for (size_t i = 0; i < format_context_->nb_streams; ++i) { | 478 for (size_t i = 0; i < format_context->nb_streams; ++i) { |
| 502 AVCodecContext* codec_context = format_context_->streams[i]->codec; | 479 AVCodecContext* codec_context = format_context->streams[i]->codec; |
| 503 AVMediaType codec_type = codec_context->codec_type; | 480 AVMediaType codec_type = codec_context->codec_type; |
| 504 | 481 |
| 505 if (codec_type == AVMEDIA_TYPE_AUDIO) { | 482 if (codec_type == AVMEDIA_TYPE_AUDIO) { |
| 506 if (found_audio_stream) | 483 if (found_audio_stream) |
| 507 continue; | 484 continue; |
| 508 // Ensure the codec is supported. | 485 // Ensure the codec is supported. |
| 509 if (CodecIDToAudioCodec(codec_context->codec_id) == kUnknownAudioCodec) | 486 if (CodecIDToAudioCodec(codec_context->codec_id) == kUnknownAudioCodec) |
| 510 continue; | 487 continue; |
| 511 found_audio_stream = true; | 488 found_audio_stream = true; |
| 512 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { | 489 } else if (codec_type == AVMEDIA_TYPE_VIDEO) { |
| 513 if (found_video_stream) | 490 if (found_video_stream) |
| 514 continue; | 491 continue; |
| 515 // Ensure the codec is supported. | 492 // Ensure the codec is supported. |
| 516 if (CodecIDToVideoCodec(codec_context->codec_id) == kUnknownVideoCodec) | 493 if (CodecIDToVideoCodec(codec_context->codec_id) == kUnknownVideoCodec) |
| 517 continue; | 494 continue; |
| 518 found_video_stream = true; | 495 found_video_stream = true; |
| 519 } else { | 496 } else { |
| 520 continue; | 497 continue; |
| 521 } | 498 } |
| 522 | 499 |
| 523 AVStream* stream = format_context_->streams[i]; | 500 AVStream* stream = format_context->streams[i]; |
| 524 scoped_refptr<FFmpegDemuxerStream> demuxer_stream( | 501 scoped_refptr<FFmpegDemuxerStream> demuxer_stream( |
| 525 new FFmpegDemuxerStream(this, stream)); | 502 new FFmpegDemuxerStream(this, stream)); |
| 526 | 503 |
| 527 streams_[i] = demuxer_stream; | 504 streams_[i] = demuxer_stream; |
| 528 max_duration = std::max(max_duration, demuxer_stream->duration()); | 505 max_duration = std::max(max_duration, demuxer_stream->duration()); |
| 529 | 506 |
| 530 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 507 if (stream->first_dts != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
| 531 const base::TimeDelta first_dts = ConvertFromTimeBase( | 508 const base::TimeDelta first_dts = ConvertFromTimeBase( |
| 532 stream->time_base, stream->first_dts); | 509 stream->time_base, stream->first_dts); |
| 533 if (start_time_ == kNoTimestamp() || first_dts < start_time_) | 510 if (start_time_ == kNoTimestamp() || first_dts < start_time_) |
| 534 start_time_ = first_dts; | 511 start_time_ = first_dts; |
| 535 } | 512 } |
| 536 } | 513 } |
| 537 | 514 |
| 538 if (!found_audio_stream && !found_video_stream) { | 515 if (!found_audio_stream && !found_video_stream) { |
| 539 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); | 516 status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS); |
| 540 return; | 517 return; |
| 541 } | 518 } |
| 542 | 519 |
| 543 if (format_context_->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { | 520 if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) { |
| 544 // If there is a duration value in the container use that to find the | 521 // If there is a duration value in the container use that to find the |
| 545 // maximum between it and the duration from A/V streams. | 522 // maximum between it and the duration from A/V streams. |
| 546 const AVRational av_time_base = {1, AV_TIME_BASE}; | 523 const AVRational av_time_base = {1, AV_TIME_BASE}; |
| 547 max_duration = | 524 max_duration = |
| 548 std::max(max_duration, | 525 std::max(max_duration, |
| 549 ConvertFromTimeBase(av_time_base, format_context_->duration)); | 526 ConvertFromTimeBase(av_time_base, format_context->duration)); |
| 550 } else { | 527 } else { |
| 551 // The duration is unknown, in which case this is likely a live stream. | 528 // The duration is unknown, in which case this is likely a live stream. |
| 552 max_duration = kInfiniteDuration(); | 529 max_duration = kInfiniteDuration(); |
| 553 } | 530 } |
| 554 | 531 |
| 555 // Some demuxers, like WAV, do not put timestamps on their frames. We | 532 // Some demuxers, like WAV, do not put timestamps on their frames. We |
| 556 // assume the the start time is 0. | 533 // assume the the start time is 0. |
| 557 if (start_time_ == kNoTimestamp()) | 534 if (start_time_ == kNoTimestamp()) |
| 558 start_time_ = base::TimeDelta(); | 535 start_time_ = base::TimeDelta(); |
| 559 | 536 |
| 560 // Good to go: set the duration and bitrate and notify we're done | 537 // Good to go: set the duration and bitrate and notify we're done |
| 561 // initializing. | 538 // initializing. |
| 562 host_->SetDuration(max_duration); | 539 host_->SetDuration(max_duration); |
| 563 duration_known_ = (max_duration != kInfiniteDuration()); | 540 duration_known_ = (max_duration != kInfiniteDuration()); |
| 564 | 541 |
| 565 int64 filesize_in_bytes = 0; | 542 int64 filesize_in_bytes = 0; |
| 566 GetSize(&filesize_in_bytes); | 543 GetSize(&filesize_in_bytes); |
| 567 bitrate_ = CalculateBitrate(format_context_, max_duration, filesize_in_bytes); | 544 bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes); |
| 568 if (bitrate_ > 0) | 545 if (bitrate_ > 0) |
| 569 data_source_->SetBitrate(bitrate_); | 546 data_source_->SetBitrate(bitrate_); |
| 570 | 547 |
| 571 status_cb.Run(PIPELINE_OK); | 548 status_cb.Run(PIPELINE_OK); |
| 572 } | 549 } |
| 573 | 550 |
| 574 void FFmpegDemuxer::SeekTask(base::TimeDelta time, const PipelineStatusCB& cb) { | 551 void FFmpegDemuxer::SeekTask(base::TimeDelta time, const PipelineStatusCB& cb) { |
| 575 DCHECK(message_loop_->BelongsToCurrentThread()); | 552 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 576 | 553 |
| 577 // Tell streams to flush buffers due to seeking. | 554 // Tell streams to flush buffers due to seeking. |
| 578 StreamVector::iterator iter; | 555 StreamVector::iterator iter; |
| 579 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { | 556 for (iter = streams_.begin(); iter != streams_.end(); ++iter) { |
| 580 if (*iter) | 557 if (*iter) |
| 581 (*iter)->FlushBuffers(); | 558 (*iter)->FlushBuffers(); |
| 582 } | 559 } |
| 583 | 560 |
| 584 // Always seek to a timestamp less than or equal to the desired timestamp. | 561 // Always seek to a timestamp less than or equal to the desired timestamp. |
| 585 int flags = AVSEEK_FLAG_BACKWARD; | 562 int flags = AVSEEK_FLAG_BACKWARD; |
| 586 | 563 |
| 587 // Passing -1 as our stream index lets FFmpeg pick a default stream. FFmpeg | 564 // Passing -1 as our stream index lets FFmpeg pick a default stream. FFmpeg |
| 588 // will attempt to use the lowest-index video stream, if present, followed by | 565 // will attempt to use the lowest-index video stream, if present, followed by |
| 589 // the lowest-index audio stream. | 566 // the lowest-index audio stream. |
| 590 if (av_seek_frame(format_context_, -1, time.InMicroseconds(), flags) < 0) { | 567 if (av_seek_frame(glue_->format_context(), -1, time.InMicroseconds(), |
| 568 flags) < 0) { | |
| 591 // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being | 569 // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being |
| 592 // captured from stdout and contaminates testing. | 570 // captured from stdout and contaminates testing. |
| 593 // TODO(scherkus): Implement this properly and signal error (BUG=23447). | 571 // TODO(scherkus): Implement this properly and signal error (BUG=23447). |
| 594 VLOG(1) << "Not implemented"; | 572 VLOG(1) << "Not implemented"; |
| 595 } | 573 } |
| 596 | 574 |
| 597 // Notify we're finished seeking. | 575 // Notify we're finished seeking. |
| 598 cb.Run(PIPELINE_OK); | 576 cb.Run(PIPELINE_OK); |
| 599 } | 577 } |
| 600 | 578 |
| 601 void FFmpegDemuxer::DemuxTask() { | 579 void FFmpegDemuxer::DemuxTask() { |
| 602 DCHECK(message_loop_->BelongsToCurrentThread()); | 580 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 603 | 581 |
| 604 // Make sure we have work to do before demuxing. | 582 // Make sure we have work to do before demuxing. |
| 605 if (!StreamsHavePendingReads()) { | 583 if (!StreamsHavePendingReads()) { |
| 606 return; | 584 return; |
| 607 } | 585 } |
| 608 | 586 |
| 609 // Allocate and read an AVPacket from the media. | 587 // Allocate and read an AVPacket from the media. |
| 610 scoped_ptr_malloc<AVPacket, ScopedPtrAVFreePacket> packet(new AVPacket()); | 588 scoped_ptr_malloc<AVPacket, ScopedPtrAVFreePacket> packet(new AVPacket()); |
| 611 int result = av_read_frame(format_context_, packet.get()); | 589 int result = av_read_frame(glue_->format_context(), packet.get()); |
| 612 if (result < 0) { | 590 if (result < 0) { |
| 613 // Update the duration based on the audio stream if | 591 // Update the duration based on the audio stream if |
| 614 // it was previously unknown http://crbug.com/86830 | 592 // it was previously unknown http://crbug.com/86830 |
| 615 if (!duration_known_) { | 593 if (!duration_known_) { |
| 616 // Search streams for AUDIO one. | 594 // Search streams for AUDIO one. |
| 617 for (StreamVector::iterator iter = streams_.begin(); | 595 for (StreamVector::iterator iter = streams_.begin(); |
| 618 iter != streams_.end(); | 596 iter != streams_.end(); |
| 619 ++iter) { | 597 ++iter) { |
| 620 if (*iter && (*iter)->type() == DemuxerStream::AUDIO) { | 598 if (*iter && (*iter)->type() == DemuxerStream::AUDIO) { |
| 621 base::TimeDelta duration = (*iter)->GetElapsedTime(); | 599 base::TimeDelta duration = (*iter)->GetElapsedTime(); |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 729 } else if (audio) { | 707 } else if (audio) { |
| 730 buffered = audio->GetBufferedRanges(); | 708 buffered = audio->GetBufferedRanges(); |
| 731 } else if (video) { | 709 } else if (video) { |
| 732 buffered = video->GetBufferedRanges(); | 710 buffered = video->GetBufferedRanges(); |
| 733 } | 711 } |
| 734 for (size_t i = 0; i < buffered.size(); ++i) | 712 for (size_t i = 0; i < buffered.size(); ++i) |
| 735 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); | 713 host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i)); |
| 736 } | 714 } |
| 737 | 715 |
| 738 } // namespace media | 716 } // namespace media |
| OLD | NEW |