Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_audio_decoder.h" | 5 #include "media/filters/ffmpeg_audio_decoder.h" |
| 6 | 6 |
| 7 #include <stdint.h> | 7 #include <stdint.h> |
| 8 | 8 |
| 9 #include "base/callback_helpers.h" | 9 #include "base/callback_helpers.h" |
| 10 #include "base/single_thread_task_runner.h" | 10 #include "base/single_thread_task_runner.h" |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 77 DLOG(ERROR) << "AVCodecContext and AVFrame disagree on channel count."; | 77 DLOG(ERROR) << "AVCodecContext and AVFrame disagree on channel count."; |
| 78 return AVERROR(EINVAL); | 78 return AVERROR(EINVAL); |
| 79 } | 79 } |
| 80 | 80 |
| 81 if (s->sample_rate != frame->sample_rate) { | 81 if (s->sample_rate != frame->sample_rate) { |
| 82 DLOG(ERROR) << "AVCodecContext and AVFrame disagree on sample rate." | 82 DLOG(ERROR) << "AVCodecContext and AVFrame disagree on sample rate." |
| 83 << s->sample_rate << " vs " << frame->sample_rate; | 83 << s->sample_rate << " vs " << frame->sample_rate; |
| 84 return AVERROR(EINVAL); | 84 return AVERROR(EINVAL); |
| 85 } | 85 } |
| 86 | 86 |
| 87 // Use CHANNEL_LAYOUT_DISCRETE for Opus Ambisonics signals | |
| 88 int is_opus_ambisonics = 0; | |
|
DaleCurtis
2017/03/20 17:38:58
bool.
flim-chromium
2017/03/22 06:20:51
Done.
| |
| 89 if (s->codec_id == AV_CODEC_ID_OPUS && s->extradata_size >= 19) { | |
| 90 int mapping_family = s->extradata[18]; | |
| 91 is_opus_ambisonics = mapping_family == 2; | |
| 92 } | |
| 93 ChannelLayout channel_layout = is_opus_ambisonics | |
|
DaleCurtis
2017/03/20 17:38:58
&& channels > 8.
flim-chromium
2017/03/22 06:20:51
Ambisonics signals could also be < 8 channels. Or
DaleCurtis
2017/03/22 18:56:30
I mean I don't think DISCRETE will be passed or wo
flim-chromium
2017/03/29 03:15:25
Thanks for the explanation. I've added '&& channel
| |
| 94 ? CHANNEL_LAYOUT_DISCRETE | |
| 95 : ChannelLayoutToChromeChannelLayout( | |
| 96 s->channel_layout, | |
| 97 s->channels); | |
| 98 | |
| 87 // Determine how big the buffer should be and allocate it. FFmpeg may adjust | 99 // Determine how big the buffer should be and allocate it. FFmpeg may adjust |
| 88 // how big each channel data is in order to meet the alignment policy, so | 100 // how big each channel data is in order to meet the alignment policy, so |
| 89 // we need to take this into consideration. | 101 // we need to take this into consideration. |
| 90 int buffer_size_in_bytes = av_samples_get_buffer_size( | 102 int buffer_size_in_bytes = av_samples_get_buffer_size( |
| 91 &frame->linesize[0], channels, frame->nb_samples, format, | 103 &frame->linesize[0], channels, frame->nb_samples, format, |
| 92 0 /* align, use ffmpeg default */); | 104 0 /* align, use ffmpeg default */); |
| 93 // Check for errors from av_samples_get_buffer_size(). | 105 // Check for errors from av_samples_get_buffer_size(). |
| 94 if (buffer_size_in_bytes < 0) | 106 if (buffer_size_in_bytes < 0) |
| 95 return buffer_size_in_bytes; | 107 return buffer_size_in_bytes; |
| 96 int frames_required = buffer_size_in_bytes / bytes_per_channel / channels; | 108 int frames_required = buffer_size_in_bytes / bytes_per_channel / channels; |
| 97 DCHECK_GE(frames_required, frame->nb_samples); | 109 DCHECK_GE(frames_required, frame->nb_samples); |
| 98 scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateBuffer( | 110 scoped_refptr<AudioBuffer> buffer = AudioBuffer::CreateBuffer( |
| 99 sample_format, | 111 sample_format, |
| 100 ChannelLayoutToChromeChannelLayout(s->channel_layout, s->channels), | 112 channel_layout, |
| 101 channels, | 113 channels, |
| 102 s->sample_rate, | 114 s->sample_rate, |
| 103 frames_required); | 115 frames_required); |
| 104 | 116 |
| 105 // Initialize the data[] and extended_data[] fields to point into the memory | 117 // Initialize the data[] and extended_data[] fields to point into the memory |
| 106 // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved | 118 // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved |
| 107 // audio and equal to |channels| for planar audio. | 119 // audio and equal to |channels| for planar audio. |
| 108 int number_of_planes = buffer->channel_data().size(); | 120 int number_of_planes = buffer->channel_data().size(); |
| 109 if (number_of_planes <= AV_NUM_DATA_POINTERS) { | 121 if (number_of_planes <= AV_NUM_DATA_POINTERS) { |
| 110 DCHECK_EQ(frame->extended_data, frame->data); | 122 DCHECK_EQ(frame->extended_data, frame->data); |
| (...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 294 bool config_changed = false; | 306 bool config_changed = false; |
| 295 if (frame_decoded) { | 307 if (frame_decoded) { |
| 296 const int channels = DetermineChannels(av_frame_.get()); | 308 const int channels = DetermineChannels(av_frame_.get()); |
| 297 ChannelLayout channel_layout = ChannelLayoutToChromeChannelLayout( | 309 ChannelLayout channel_layout = ChannelLayoutToChromeChannelLayout( |
| 298 codec_context_->channel_layout, codec_context_->channels); | 310 codec_context_->channel_layout, codec_context_->channels); |
| 299 | 311 |
| 300 bool is_sample_rate_change = | 312 bool is_sample_rate_change = |
| 301 av_frame_->sample_rate != config_.samples_per_second(); | 313 av_frame_->sample_rate != config_.samples_per_second(); |
| 302 bool is_config_stale = | 314 bool is_config_stale = |
| 303 is_sample_rate_change || | 315 is_sample_rate_change || |
| 304 channels != ChannelLayoutToChannelCount(config_.channel_layout()) || | 316 channels != config_.channels() || |
| 305 av_frame_->format != av_sample_format_; | 317 av_frame_->format != av_sample_format_; |
| 306 | 318 |
| 307 // Only consider channel layout changes for AAC. | 319 // Only consider channel layout changes for AAC. |
| 308 // TODO(tguilbert, dalecurtis): Due to http://crbug.com/600538 we need to | 320 // TODO(tguilbert, dalecurtis): Due to http://crbug.com/600538 we need to |
| 309 // allow channel layout changes for the moment. See if ffmpeg is fixable. | 321 // allow channel layout changes for the moment. See if ffmpeg is fixable. |
| 310 if (config_.codec() == kCodecAAC) | 322 if (config_.codec() == kCodecAAC) |
| 311 is_config_stale |= channel_layout != config_.channel_layout(); | 323 is_config_stale |= channel_layout != config_.channel_layout(); |
| 312 | 324 |
| 313 if (is_config_stale) { | 325 if (is_config_stale) { |
| 314 // Only allow midstream configuration changes for AAC. Sample format is | 326 // Only allow midstream configuration changes for AAC. Sample format is |
| 315 // not expected to change between AAC profiles. | 327 // not expected to change between AAC profiles. |
| 316 if (config_.codec() == kCodecAAC && | 328 if (config_.codec() == kCodecAAC && |
| 317 av_frame_->format == av_sample_format_) { | 329 av_frame_->format == av_sample_format_) { |
| 318 MEDIA_LOG(DEBUG, media_log_) | 330 MEDIA_LOG(DEBUG, media_log_) |
| 319 << " Detected AAC midstream configuration change" | 331 << " Detected AAC midstream configuration change" |
| 320 << " PTS:" << buffer->timestamp().InMicroseconds() | 332 << " PTS:" << buffer->timestamp().InMicroseconds() |
| 321 << " Sample Rate: " << av_frame_->sample_rate << " vs " | 333 << " Sample Rate: " << av_frame_->sample_rate << " vs " |
| 322 << config_.samples_per_second() | 334 << config_.samples_per_second() |
| 323 << ", ChannelLayout: " << channel_layout << " vs " | 335 << ", ChannelLayout: " << channel_layout << " vs " |
| 324 << config_.channel_layout() << ", Channels: " << channels | 336 << config_.channel_layout() << ", Channels: " << channels |
| 325 << " vs " | 337 << " vs " |
| 326 << ChannelLayoutToChannelCount(config_.channel_layout()); | 338 << config_.channels(); |
| 327 config_.Initialize(config_.codec(), config_.sample_format(), | 339 config_.Initialize(config_.codec(), config_.sample_format(), |
| 328 channel_layout, av_frame_->sample_rate, | 340 channel_layout, av_frame_->sample_rate, |
| 329 config_.extra_data(), config_.encryption_scheme(), | 341 config_.extra_data(), config_.encryption_scheme(), |
| 330 config_.seek_preroll(), config_.codec_delay()); | 342 config_.seek_preroll(), config_.codec_delay()); |
| 331 config_changed = true; | 343 config_changed = true; |
| 332 if (is_sample_rate_change) | 344 if (is_sample_rate_change) |
| 333 ResetTimestampState(); | 345 ResetTimestampState(); |
| 334 } else { | 346 } else { |
| 335 MEDIA_LOG(ERROR, media_log_) | 347 MEDIA_LOG(ERROR, media_log_) |
| 336 << "Unsupported midstream configuration change!" | 348 << "Unsupported midstream configuration change!" |
| 337 << " Sample Rate: " << av_frame_->sample_rate << " vs " | 349 << " Sample Rate: " << av_frame_->sample_rate << " vs " |
| 338 << config_.samples_per_second() << ", Channels: " << channels | 350 << config_.samples_per_second() << ", Channels: " << channels |
| 339 << " vs " << ChannelLayoutToChannelCount(config_.channel_layout()) | 351 << " vs " << config_.channels() |
| 340 << ", Sample Format: " << av_frame_->format << " vs " | 352 << ", Sample Format: " << av_frame_->format << " vs " |
| 341 << av_sample_format_; | 353 << av_sample_format_; |
| 342 // This is an unrecoverable error, so bail out. | 354 // This is an unrecoverable error, so bail out. |
| 343 av_frame_unref(av_frame_.get()); | 355 av_frame_unref(av_frame_.get()); |
| 344 return false; | 356 return false; |
| 345 } | 357 } |
| 346 } | 358 } |
| 347 | 359 |
| 348 // Get the AudioBuffer that the data was decoded into. Adjust the number | 360 // Get the AudioBuffer that the data was decoded into. Adjust the number |
| 349 // of frames, in case fewer than requested were actually decoded. | 361 // of frames, in case fewer than requested were actually decoded. |
| 350 output = reinterpret_cast<AudioBuffer*>( | 362 output = reinterpret_cast<AudioBuffer*>( |
| 351 av_buffer_get_opaque(av_frame_->buf[0])); | 363 av_buffer_get_opaque(av_frame_->buf[0])); |
| 352 | 364 |
| 353 DCHECK_EQ(ChannelLayoutToChannelCount(config_.channel_layout()), | 365 DCHECK_EQ(config_.channels(), output->channel_count()); |
| 354 output->channel_count()); | |
| 355 const int unread_frames = output->frame_count() - av_frame_->nb_samples; | 366 const int unread_frames = output->frame_count() - av_frame_->nb_samples; |
| 356 DCHECK_GE(unread_frames, 0); | 367 DCHECK_GE(unread_frames, 0); |
| 357 if (unread_frames > 0) | 368 if (unread_frames > 0) |
| 358 output->TrimEnd(unread_frames); | 369 output->TrimEnd(unread_frames); |
| 359 av_frame_unref(av_frame_.get()); | 370 av_frame_unref(av_frame_.get()); |
| 360 } | 371 } |
| 361 | 372 |
| 362 // WARNING: |av_frame_| no longer has valid data at this point. | 373 // WARNING: |av_frame_| no longer has valid data at this point. |
| 363 const int decoded_frames = frame_decoded ? output->frame_count() : 0; | 374 const int decoded_frames = frame_decoded ? output->frame_count() : 0; |
| 364 if (IsEndOfStream(result, decoded_frames, buffer)) { | 375 if (IsEndOfStream(result, decoded_frames, buffer)) { |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 409 << codec_context_->codec_id; | 420 << codec_context_->codec_id; |
| 410 ReleaseFFmpegResources(); | 421 ReleaseFFmpegResources(); |
| 411 state_ = kUninitialized; | 422 state_ = kUninitialized; |
| 412 return false; | 423 return false; |
| 413 } | 424 } |
| 414 | 425 |
| 415 // Success! | 426 // Success! |
| 416 av_frame_.reset(av_frame_alloc()); | 427 av_frame_.reset(av_frame_alloc()); |
| 417 av_sample_format_ = codec_context_->sample_fmt; | 428 av_sample_format_ = codec_context_->sample_fmt; |
| 418 | 429 |
| 419 if (codec_context_->channels != | 430 |
| 420 ChannelLayoutToChannelCount(config_.channel_layout())) { | 431 if (codec_context_->channels != config_.channels()) { |
| 421 DLOG(ERROR) << "Audio configuration specified " | 432 DLOG(ERROR) << "Audio configuration specified " |
| 422 << ChannelLayoutToChannelCount(config_.channel_layout()) | 433 << config_.channels() |
| 423 << " channels, but FFmpeg thinks the file contains " | 434 << " channels, but FFmpeg thinks the file contains " |
| 424 << codec_context_->channels << " channels"; | 435 << codec_context_->channels << " channels"; |
| 425 ReleaseFFmpegResources(); | 436 ReleaseFFmpegResources(); |
| 426 state_ = kUninitialized; | 437 state_ = kUninitialized; |
| 427 return false; | 438 return false; |
| 428 } | 439 } |
| 429 | 440 |
| 430 ResetTimestampState(); | 441 ResetTimestampState(); |
| 431 return true; | 442 return true; |
| 432 } | 443 } |
| 433 | 444 |
| 434 void FFmpegAudioDecoder::ResetTimestampState() { | 445 void FFmpegAudioDecoder::ResetTimestampState() { |
| 435 // Opus codec delay is handled by ffmpeg. | 446 // Opus codec delay is handled by ffmpeg. |
| 436 const int codec_delay = | 447 const int codec_delay = |
| 437 config_.codec() == kCodecOpus ? 0 : config_.codec_delay(); | 448 config_.codec() == kCodecOpus ? 0 : config_.codec_delay(); |
| 438 discard_helper_.reset( | 449 discard_helper_.reset( |
| 439 new AudioDiscardHelper(config_.samples_per_second(), codec_delay, | 450 new AudioDiscardHelper(config_.samples_per_second(), codec_delay, |
| 440 config_.codec() == kCodecVorbis)); | 451 config_.codec() == kCodecVorbis)); |
| 441 discard_helper_->Reset(codec_delay); | 452 discard_helper_->Reset(codec_delay); |
| 442 } | 453 } |
| 443 | 454 |
| 444 } // namespace media | 455 } // namespace media |
| OLD | NEW |