Index: media/filters/ffmpeg_video_decoder.cc |
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc |
index 14852fff22ebb1b8362de58038356f7fcbeb5371..cf959f7fb97f91dddeedfcd47c0b4ad2f4d52063 100644 |
--- a/media/filters/ffmpeg_video_decoder.cc |
+++ b/media/filters/ffmpeg_video_decoder.cc |
@@ -62,8 +62,7 @@ FFmpegVideoDecoder::FFmpegVideoDecoder( |
state_(kUninitialized), |
codec_context_(NULL), |
av_frame_(NULL), |
- demuxer_stream_(NULL) { |
-} |
+ demuxer_stream_(NULL) {} |
int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
AVFrame* frame) { |
@@ -73,8 +72,7 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
// updated width/height/pix_fmt, which can change for adaptive |
// content. |
VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); |
- if (format == VideoFrame::INVALID) |
- return AVERROR(EINVAL); |
+ if (format == VideoFrame::INVALID) return AVERROR(EINVAL); |
DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16); |
gfx::Size size(codec_context->width, codec_context->height); |
@@ -84,8 +82,7 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
gfx::Size natural_size; |
if (codec_context->sample_aspect_ratio.num > 0) { |
- natural_size = GetNaturalSize(size, |
- codec_context->sample_aspect_ratio.num, |
+ natural_size = GetNaturalSize(size, codec_context->sample_aspect_ratio.num, |
codec_context->sample_aspect_ratio.den); |
} else { |
natural_size = demuxer_stream_->video_decoder_config().natural_size(); |
@@ -94,9 +91,8 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
if (!VideoFrame::IsValidConfig(format, size, gfx::Rect(size), natural_size)) |
return AVERROR(EINVAL); |
- scoped_refptr<VideoFrame> video_frame = |
- VideoFrame::CreateFrame(format, size, gfx::Rect(size), natural_size, |
- kNoTimestamp()); |
+ scoped_refptr<VideoFrame> video_frame = VideoFrame::CreateFrame( |
+ format, size, gfx::Rect(size), natural_size, kNoTimestamp()); |
for (int i = 0; i < 3; i++) { |
frame->base[i] = video_frame->data(i); |
@@ -107,8 +103,8 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
frame->opaque = NULL; |
video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); |
frame->type = FF_BUFFER_TYPE_USER; |
- frame->pkt_pts = codec_context->pkt ? codec_context->pkt->pts : |
- AV_NOPTS_VALUE; |
+ frame->pkt_pts = |
+ codec_context->pkt ? codec_context->pkt->pts : AV_NOPTS_VALUE; |
frame->width = codec_context->width; |
frame->height = codec_context->height; |
frame->format = codec_context->pix_fmt; |
@@ -183,8 +179,7 @@ void FFmpegVideoDecoder::Reset(const base::Closure& closure) { |
reset_cb_ = BindToCurrentLoop(closure); |
// Defer the reset if a read is pending. |
- if (!read_cb_.is_null()) |
- return; |
+ if (!read_cb_.is_null()) return; |
DoReset(); |
} |
@@ -201,11 +196,9 @@ void FFmpegVideoDecoder::Stop(const base::Closure& closure) { |
DCHECK(message_loop_->BelongsToCurrentThread()); |
base::ScopedClosureRunner runner(BindToCurrentLoop(closure)); |
- if (state_ == kUninitialized) |
- return; |
+ if (state_ == kUninitialized) return; |
- if (!read_cb_.is_null()) |
- base::ResetAndReturn(&read_cb_).Run(kOk, NULL); |
+ if (!read_cb_.is_null()) base::ResetAndReturn(&read_cb_).Run(kOk, NULL); |
ReleaseFFmpegResources(); |
state_ = kUninitialized; |
@@ -223,20 +216,18 @@ void FFmpegVideoDecoder::ReadFromDemuxerStream() { |
DCHECK_NE(state_, kError); |
DCHECK(!read_cb_.is_null()); |
- demuxer_stream_->Read(base::Bind( |
- &FFmpegVideoDecoder::BufferReady, weak_this_)); |
+ demuxer_stream_->Read( |
+ base::Bind(&FFmpegVideoDecoder::BufferReady, weak_this_)); |
} |
void FFmpegVideoDecoder::BufferReady( |
- DemuxerStream::Status status, |
- const scoped_refptr<DecoderBuffer>& buffer) { |
+ DemuxerStream::Status status, const scoped_refptr<DecoderBuffer>& buffer) { |
DCHECK(message_loop_->BelongsToCurrentThread()); |
DCHECK_NE(state_, kDecodeFinished); |
DCHECK_NE(state_, kError); |
DCHECK_EQ(status != DemuxerStream::kOk, !buffer.get()) << status; |
- if (state_ == kUninitialized) |
- return; |
+ if (state_ == kUninitialized) return; |
DCHECK(!read_cb_.is_null()); |
@@ -284,7 +275,7 @@ void FFmpegVideoDecoder::DecodeBuffer( |
// These are the possible state transitions. |
// |
// kNormal -> kFlushCodec: |
- // When buffer->IsEndOfStream() is first true. |
+ // When buffer->is_end_of_stream() is first true. |
// kNormal -> kError: |
// A decoding error occurs and decoding needs to stop. |
// kFlushCodec -> kDecodeFinished: |
@@ -295,7 +286,7 @@ void FFmpegVideoDecoder::DecodeBuffer( |
// Any time Reset() is called. |
// Transition to kFlushCodec on the first end of stream buffer. |
- if (state_ == kNormal && buffer->IsEndOfStream()) { |
+ if (state_ == kNormal && buffer->is_end_of_stream()) { |
state_ = kFlushCodec; |
} |
@@ -307,15 +298,15 @@ void FFmpegVideoDecoder::DecodeBuffer( |
} |
// Any successful decode counts! |
- if (!buffer->IsEndOfStream() && buffer->GetDataSize() > 0) { |
+ if (!buffer->is_end_of_stream() && buffer->get_data_size() > 0) { |
PipelineStatistics statistics; |
- statistics.video_bytes_decoded = buffer->GetDataSize(); |
+ statistics.video_bytes_decoded = buffer->get_data_size(); |
statistics_cb_.Run(statistics); |
} |
if (!video_frame.get()) { |
if (state_ == kFlushCodec) { |
- DCHECK(buffer->IsEndOfStream()); |
+ DCHECK(buffer->is_end_of_stream()); |
state_ = kDecodeFinished; |
base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame()); |
return; |
@@ -328,9 +319,8 @@ void FFmpegVideoDecoder::DecodeBuffer( |
base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); |
} |
-bool FFmpegVideoDecoder::Decode( |
- const scoped_refptr<DecoderBuffer>& buffer, |
- scoped_refptr<VideoFrame>* video_frame) { |
+bool FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer, |
+ scoped_refptr<VideoFrame>* video_frame) { |
DCHECK(video_frame); |
// Reset frame to default values. |
@@ -340,15 +330,15 @@ bool FFmpegVideoDecoder::Decode( |
// Due to FFmpeg API changes we no longer have const read-only pointers. |
AVPacket packet; |
av_init_packet(&packet); |
- if (buffer->IsEndOfStream()) { |
+ if (buffer->is_end_of_stream()) { |
packet.data = NULL; |
packet.size = 0; |
} else { |
- packet.data = const_cast<uint8*>(buffer->GetData()); |
- packet.size = buffer->GetDataSize(); |
+ packet.data = const_cast<uint8*>(buffer->get_data()); |
+ packet.size = buffer->get_data_size(); |
// Let FFmpeg handle presentation timestamp reordering. |
- codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds(); |
+ codec_context_->reordered_opaque = buffer->get_timestamp().InMicroseconds(); |
// This is for codecs not using get_buffer to initialize |
// |av_frame_->reordered_opaque| |
@@ -356,13 +346,12 @@ bool FFmpegVideoDecoder::Decode( |
} |
int frame_decoded = 0; |
- int result = avcodec_decode_video2(codec_context_, |
- av_frame_, |
- &frame_decoded, |
- &packet); |
+ int result = |
+ avcodec_decode_video2(codec_context_, av_frame_, &frame_decoded, &packet); |
// Log the problem if we can't decode a video frame and exit early. |
if (result < 0) { |
- LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString(); |
+ LOG(ERROR) |
+ << "Error decoding video: " << buffer->as_human_readable_string(); |
*video_frame = NULL; |
return false; |
} |