OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_audio_decoder.h" | 5 #include "media/filters/ffmpeg_audio_decoder.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 | 8 |
9 #include "base/callback_helpers.h" | 9 #include "base/callback_helpers.h" |
10 #include "base/single_thread_task_runner.h" | 10 #include "base/single_thread_task_runner.h" |
(...skipping 24 matching lines...) Expand all Loading... |
35 // Return the number of channels from the data in |frame|. | 35 // Return the number of channels from the data in |frame|. |
36 static inline int DetermineChannels(AVFrame* frame) { | 36 static inline int DetermineChannels(AVFrame* frame) { |
37 #if defined(CHROMIUM_NO_AVFRAME_CHANNELS) | 37 #if defined(CHROMIUM_NO_AVFRAME_CHANNELS) |
38 // When use_system_ffmpeg==1, libav's AVFrame doesn't have channels field. | 38 // When use_system_ffmpeg==1, libav's AVFrame doesn't have channels field. |
39 return av_get_channel_layout_nb_channels(frame->channel_layout); | 39 return av_get_channel_layout_nb_channels(frame->channel_layout); |
40 #else | 40 #else |
41 return frame->channels; | 41 return frame->channels; |
42 #endif | 42 #endif |
43 } | 43 } |
44 | 44 |
| 45 // Called by FFmpeg's allocation routine to allocate a buffer. Uses |
| 46 // AVCodecContext.opaque to get the object reference in order to call |
| 47 // GetAudioBuffer() to do the actual allocation. |
| 48 static int GetAudioBufferImpl(struct AVCodecContext* s, |
| 49 AVFrame* frame, |
| 50 int flags) { |
| 51 FFmpegAudioDecoder* decoder = static_cast<FFmpegAudioDecoder*>(s->opaque); |
| 52 return decoder->GetAudioBuffer(s, frame, flags); |
| 53 } |
| 54 |
45 // Called by FFmpeg's allocation routine to free a buffer. |opaque| is the | 55 // Called by FFmpeg's allocation routine to free a buffer. |opaque| is the |
46 // AudioBuffer allocated, so unref it. | 56 // AudioBuffer allocated, so unref it. |
47 static void ReleaseAudioBufferImpl(void* opaque, uint8_t* data) { | 57 static void ReleaseAudioBufferImpl(void* opaque, uint8_t* data) { |
48 if (opaque) | 58 if (opaque) |
49 static_cast<AudioBuffer*>(opaque)->Release(); | 59 static_cast<AudioBuffer*>(opaque)->Release(); |
50 } | 60 } |
51 | 61 |
52 // Called by FFmpeg's allocation routine to allocate a buffer. Uses | |
53 // AVCodecContext.opaque to get the object reference in order to call | |
54 // GetAudioBuffer() to do the actual allocation. | |
55 static int GetAudioBuffer(struct AVCodecContext* s, AVFrame* frame, int flags) { | |
56 DCHECK(s->codec->capabilities & CODEC_CAP_DR1); | |
57 DCHECK_EQ(s->codec_type, AVMEDIA_TYPE_AUDIO); | |
58 | |
59 // Since this routine is called by FFmpeg when a buffer is required for audio | |
60 // data, use the values supplied by FFmpeg (ignoring the current settings). | |
61 // FFmpegDecode() gets to determine if the buffer is useable or not. | |
62 AVSampleFormat format = static_cast<AVSampleFormat>(frame->format); | |
63 SampleFormat sample_format = | |
64 AVSampleFormatToSampleFormat(format, s->codec_id); | |
65 int channels = DetermineChannels(frame); | |
66 if (channels <= 0 || channels >= limits::kMaxChannels) { | |
67 DLOG(ERROR) << "Requested number of channels (" << channels | |
68 << ") exceeds limit."; | |
69 return AVERROR(EINVAL); | |
70 } | |
71 | |
72 int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format); | |
73 if (frame->nb_samples <= 0) | |
74 return AVERROR(EINVAL); | |
75 | |
76 if (s->channels != channels) { | |
77 DLOG(ERROR) << "AVCodecContext and AVFrame disagree on channel count."; | |
78 return AVERROR(EINVAL); | |
79 } | |
80 | |
81 if (s->sample_rate != frame->sample_rate) { | |
82 DLOG(ERROR) << "AVCodecContext and AVFrame disagree on sample rate." | |
83 << s->sample_rate << " vs " << frame->sample_rate; | |
84 return AVERROR(EINVAL); | |
85 } | |
86 | |
87 // Determine how big the buffer should be and allocate it. FFmpeg may adjust | |
88 // how big each channel data is in order to meet the alignment policy, so | |
89 // we need to take this into consideration. | |
90 int buffer_size_in_bytes = av_samples_get_buffer_size( | |
91 &frame->linesize[0], channels, frame->nb_samples, format, | |
92 0 /* align, use ffmpeg default */); | |
93 // Check for errors from av_samples_get_buffer_size(). | |
94 if (buffer_size_in_bytes < 0) | |
95 return buffer_size_in_bytes; | |
96 int frames_required = buffer_size_in_bytes / bytes_per_channel / channels; | |
97 DCHECK_GE(frames_required, frame->nb_samples); | |
98 | |
99 ChannelLayout channel_layout = | |
100 ChannelLayoutToChromeChannelLayout(s->channel_layout, s->channels); | |
101 | |
102 if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) { | |
103 DLOG(ERROR) << "Unsupported channel layout."; | |
104 return AVERROR(EINVAL); | |
105 } | |
106 | |
107 scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateBuffer( | |
108 sample_format, channel_layout, channels, s->sample_rate, frames_required); | |
109 | |
110 // Initialize the data[] and extended_data[] fields to point into the memory | |
111 // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved | |
112 // audio and equal to |channels| for planar audio. | |
113 int number_of_planes = buffer->channel_data().size(); | |
114 if (number_of_planes <= AV_NUM_DATA_POINTERS) { | |
115 DCHECK_EQ(frame->extended_data, frame->data); | |
116 for (int i = 0; i < number_of_planes; ++i) | |
117 frame->data[i] = buffer->channel_data()[i]; | |
118 } else { | |
119 // There are more channels than can fit into data[], so allocate | |
120 // extended_data[] and fill appropriately. | |
121 frame->extended_data = static_cast<uint8_t**>( | |
122 av_malloc(number_of_planes * sizeof(*frame->extended_data))); | |
123 int i = 0; | |
124 for (; i < AV_NUM_DATA_POINTERS; ++i) | |
125 frame->extended_data[i] = frame->data[i] = buffer->channel_data()[i]; | |
126 for (; i < number_of_planes; ++i) | |
127 frame->extended_data[i] = buffer->channel_data()[i]; | |
128 } | |
129 | |
130 // Now create an AVBufferRef for the data just allocated. It will own the | |
131 // reference to the AudioBuffer object. | |
132 AudioBuffer* opaque = buffer.get(); | |
133 opaque->AddRef(); | |
134 frame->buf[0] = av_buffer_create( | |
135 frame->data[0], buffer_size_in_bytes, ReleaseAudioBufferImpl, opaque, 0); | |
136 return 0; | |
137 } | |
138 | |
139 FFmpegAudioDecoder::FFmpegAudioDecoder( | 62 FFmpegAudioDecoder::FFmpegAudioDecoder( |
140 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, | 63 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, |
141 const scoped_refptr<MediaLog>& media_log) | 64 const scoped_refptr<MediaLog>& media_log) |
142 : task_runner_(task_runner), | 65 : task_runner_(task_runner), |
143 state_(kUninitialized), | 66 state_(kUninitialized), |
144 av_sample_format_(0), | 67 av_sample_format_(0), |
145 media_log_(media_log) { | 68 media_log_(media_log), |
146 } | 69 pool_(new AudioBufferMemoryPool()) {} |
147 | 70 |
148 FFmpegAudioDecoder::~FFmpegAudioDecoder() { | 71 FFmpegAudioDecoder::~FFmpegAudioDecoder() { |
149 DCHECK(task_runner_->BelongsToCurrentThread()); | 72 DCHECK(task_runner_->BelongsToCurrentThread()); |
150 | 73 |
151 if (state_ != kUninitialized) | 74 if (state_ != kUninitialized) |
152 ReleaseFFmpegResources(); | 75 ReleaseFFmpegResources(); |
153 } | 76 } |
154 | 77 |
155 std::string FFmpegAudioDecoder::GetDisplayName() const { | 78 std::string FFmpegAudioDecoder::GetDisplayName() const { |
156 return "FFmpegAudioDecoder"; | 79 return "FFmpegAudioDecoder"; |
(...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
395 DCHECK(!config_.is_encrypted()); | 318 DCHECK(!config_.is_encrypted()); |
396 | 319 |
397 // Release existing decoder resources if necessary. | 320 // Release existing decoder resources if necessary. |
398 ReleaseFFmpegResources(); | 321 ReleaseFFmpegResources(); |
399 | 322 |
400 // Initialize AVCodecContext structure. | 323 // Initialize AVCodecContext structure. |
401 codec_context_.reset(avcodec_alloc_context3(NULL)); | 324 codec_context_.reset(avcodec_alloc_context3(NULL)); |
402 AudioDecoderConfigToAVCodecContext(config_, codec_context_.get()); | 325 AudioDecoderConfigToAVCodecContext(config_, codec_context_.get()); |
403 | 326 |
404 codec_context_->opaque = this; | 327 codec_context_->opaque = this; |
405 codec_context_->get_buffer2 = GetAudioBuffer; | 328 codec_context_->get_buffer2 = GetAudioBufferImpl; |
406 codec_context_->refcounted_frames = 1; | 329 codec_context_->refcounted_frames = 1; |
407 | 330 |
408 if (config_.codec() == kCodecOpus) | 331 if (config_.codec() == kCodecOpus) |
409 codec_context_->request_sample_fmt = AV_SAMPLE_FMT_FLT; | 332 codec_context_->request_sample_fmt = AV_SAMPLE_FMT_FLT; |
410 | 333 |
411 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 334 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
412 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { | 335 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { |
413 DLOG(ERROR) << "Could not initialize audio decoder: " | 336 DLOG(ERROR) << "Could not initialize audio decoder: " |
414 << codec_context_->codec_id; | 337 << codec_context_->codec_id; |
415 ReleaseFFmpegResources(); | 338 ReleaseFFmpegResources(); |
(...skipping 23 matching lines...) Expand all Loading... |
439 void FFmpegAudioDecoder::ResetTimestampState() { | 362 void FFmpegAudioDecoder::ResetTimestampState() { |
440 // Opus codec delay is handled by ffmpeg. | 363 // Opus codec delay is handled by ffmpeg. |
441 const int codec_delay = | 364 const int codec_delay = |
442 config_.codec() == kCodecOpus ? 0 : config_.codec_delay(); | 365 config_.codec() == kCodecOpus ? 0 : config_.codec_delay(); |
443 discard_helper_.reset( | 366 discard_helper_.reset( |
444 new AudioDiscardHelper(config_.samples_per_second(), codec_delay, | 367 new AudioDiscardHelper(config_.samples_per_second(), codec_delay, |
445 config_.codec() == kCodecVorbis)); | 368 config_.codec() == kCodecVorbis)); |
446 discard_helper_->Reset(codec_delay); | 369 discard_helper_->Reset(codec_delay); |
447 } | 370 } |
448 | 371 |
| 372 int FFmpegAudioDecoder::GetAudioBuffer(struct AVCodecContext* s, |
| 373 AVFrame* frame, |
| 374 int flags) { |
| 375 DCHECK(s->codec->capabilities & CODEC_CAP_DR1); |
| 376 DCHECK_EQ(s->codec_type, AVMEDIA_TYPE_AUDIO); |
| 377 |
| 378 // Since this routine is called by FFmpeg when a buffer is required for audio |
| 379 // data, use the values supplied by FFmpeg (ignoring the current settings). |
| 380 // FFmpegDecode() gets to determine if the buffer is useable or not. |
| 381 AVSampleFormat format = static_cast<AVSampleFormat>(frame->format); |
| 382 SampleFormat sample_format = |
| 383 AVSampleFormatToSampleFormat(format, s->codec_id); |
| 384 int channels = DetermineChannels(frame); |
| 385 if (channels <= 0 || channels >= limits::kMaxChannels) { |
| 386 DLOG(ERROR) << "Requested number of channels (" << channels |
| 387 << ") exceeds limit."; |
| 388 return AVERROR(EINVAL); |
| 389 } |
| 390 |
| 391 int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format); |
| 392 if (frame->nb_samples <= 0) |
| 393 return AVERROR(EINVAL); |
| 394 |
| 395 if (s->channels != channels) { |
| 396 DLOG(ERROR) << "AVCodecContext and AVFrame disagree on channel count."; |
| 397 return AVERROR(EINVAL); |
| 398 } |
| 399 |
| 400 if (s->sample_rate != frame->sample_rate) { |
| 401 DLOG(ERROR) << "AVCodecContext and AVFrame disagree on sample rate." |
| 402 << s->sample_rate << " vs " << frame->sample_rate; |
| 403 return AVERROR(EINVAL); |
| 404 } |
| 405 |
| 406 // Determine how big the buffer should be and allocate it. FFmpeg may adjust |
| 407 // how big each channel data is in order to meet the alignment policy, so |
| 408 // we need to take this into consideration. |
| 409 int buffer_size_in_bytes = av_samples_get_buffer_size( |
| 410 &frame->linesize[0], channels, frame->nb_samples, format, |
| 411 0 /* align, use ffmpeg default */); |
| 412 // Check for errors from av_samples_get_buffer_size(). |
| 413 if (buffer_size_in_bytes < 0) |
| 414 return buffer_size_in_bytes; |
| 415 int frames_required = buffer_size_in_bytes / bytes_per_channel / channels; |
| 416 DCHECK_GE(frames_required, frame->nb_samples); |
| 417 |
| 418 ChannelLayout channel_layout = |
| 419 ChannelLayoutToChromeChannelLayout(s->channel_layout, s->channels); |
| 420 |
| 421 if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) { |
| 422 DLOG(ERROR) << "Unsupported channel layout."; |
| 423 return AVERROR(EINVAL); |
| 424 } |
| 425 |
| 426 scoped_refptr<AudioBuffer> buffer = |
| 427 AudioBuffer::CreateBuffer(sample_format, channel_layout, channels, |
| 428 s->sample_rate, frames_required, pool_); |
| 429 |
| 430 // Initialize the data[] and extended_data[] fields to point into the memory |
| 431 // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved |
| 432 // audio and equal to |channels| for planar audio. |
| 433 int number_of_planes = buffer->channel_data().size(); |
| 434 if (number_of_planes <= AV_NUM_DATA_POINTERS) { |
| 435 DCHECK_EQ(frame->extended_data, frame->data); |
| 436 for (int i = 0; i < number_of_planes; ++i) |
| 437 frame->data[i] = buffer->channel_data()[i]; |
| 438 } else { |
| 439 // There are more channels than can fit into data[], so allocate |
| 440 // extended_data[] and fill appropriately. |
| 441 frame->extended_data = static_cast<uint8_t**>( |
| 442 av_malloc(number_of_planes * sizeof(*frame->extended_data))); |
| 443 int i = 0; |
| 444 for (; i < AV_NUM_DATA_POINTERS; ++i) |
| 445 frame->extended_data[i] = frame->data[i] = buffer->channel_data()[i]; |
| 446 for (; i < number_of_planes; ++i) |
| 447 frame->extended_data[i] = buffer->channel_data()[i]; |
| 448 } |
| 449 |
| 450 // Now create an AVBufferRef for the data just allocated. It will own the |
| 451 // reference to the AudioBuffer object. |
| 452 AudioBuffer* opaque = buffer.get(); |
| 453 opaque->AddRef(); |
| 454 frame->buf[0] = av_buffer_create(frame->data[0], buffer_size_in_bytes, |
| 455 ReleaseAudioBufferImpl, opaque, 0); |
| 456 return 0; |
| 457 } |
| 458 |
449 } // namespace media | 459 } // namespace media |
OLD | NEW |