| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_audio_decoder.h" | 5 #include "media/filters/ffmpeg_audio_decoder.h" |
| 6 | 6 |
| 7 #include "base/callback_helpers.h" | 7 #include "base/callback_helpers.h" |
| 8 #include "base/single_thread_task_runner.h" | 8 #include "base/single_thread_task_runner.h" |
| 9 #include "media/base/audio_buffer.h" | 9 #include "media/base/audio_buffer.h" |
| 10 #include "media/base/audio_bus.h" | 10 #include "media/base/audio_bus.h" |
| (...skipping 24 matching lines...) Expand all Loading... |
| 35 #if defined(CHROMIUM_NO_AVFRAME_CHANNELS) | 35 #if defined(CHROMIUM_NO_AVFRAME_CHANNELS) |
| 36 // When use_system_ffmpeg==1, libav's AVFrame doesn't have channels field. | 36 // When use_system_ffmpeg==1, libav's AVFrame doesn't have channels field. |
| 37 return av_get_channel_layout_nb_channels(frame->channel_layout); | 37 return av_get_channel_layout_nb_channels(frame->channel_layout); |
| 38 #else | 38 #else |
| 39 return frame->channels; | 39 return frame->channels; |
| 40 #endif | 40 #endif |
| 41 } | 41 } |
| 42 | 42 |
| 43 // Called by FFmpeg's allocation routine to free a buffer. |opaque| is the | 43 // Called by FFmpeg's allocation routine to free a buffer. |opaque| is the |
| 44 // AudioBuffer allocated, so unref it. | 44 // AudioBuffer allocated, so unref it. |
| 45 static void ReleaseAudioBufferImpl(void* opaque, uint8* data) { | 45 static void ReleaseAudioBufferImpl(void* opaque, uint8_t* data) { |
| 46 scoped_refptr<AudioBuffer> buffer; | 46 scoped_refptr<AudioBuffer> buffer; |
| 47 buffer.swap(reinterpret_cast<AudioBuffer**>(&opaque)); | 47 buffer.swap(reinterpret_cast<AudioBuffer**>(&opaque)); |
| 48 } | 48 } |
| 49 | 49 |
| 50 // Called by FFmpeg's allocation routine to allocate a buffer. Uses | 50 // Called by FFmpeg's allocation routine to allocate a buffer. Uses |
| 51 // AVCodecContext.opaque to get the object reference in order to call | 51 // AVCodecContext.opaque to get the object reference in order to call |
| 52 // GetAudioBuffer() to do the actual allocation. | 52 // GetAudioBuffer() to do the actual allocation. |
| 53 static int GetAudioBuffer(struct AVCodecContext* s, AVFrame* frame, int flags) { | 53 static int GetAudioBuffer(struct AVCodecContext* s, AVFrame* frame, int flags) { |
| 54 DCHECK(s->codec->capabilities & CODEC_CAP_DR1); | 54 DCHECK(s->codec->capabilities & CODEC_CAP_DR1); |
| 55 DCHECK_EQ(s->codec_type, AVMEDIA_TYPE_AUDIO); | 55 DCHECK_EQ(s->codec_type, AVMEDIA_TYPE_AUDIO); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 101 // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved | 101 // allocated for AudioBuffer. |number_of_planes| will be 1 for interleaved |
| 102 // audio and equal to |channels| for planar audio. | 102 // audio and equal to |channels| for planar audio. |
| 103 int number_of_planes = buffer->channel_data().size(); | 103 int number_of_planes = buffer->channel_data().size(); |
| 104 if (number_of_planes <= AV_NUM_DATA_POINTERS) { | 104 if (number_of_planes <= AV_NUM_DATA_POINTERS) { |
| 105 DCHECK_EQ(frame->extended_data, frame->data); | 105 DCHECK_EQ(frame->extended_data, frame->data); |
| 106 for (int i = 0; i < number_of_planes; ++i) | 106 for (int i = 0; i < number_of_planes; ++i) |
| 107 frame->data[i] = buffer->channel_data()[i]; | 107 frame->data[i] = buffer->channel_data()[i]; |
| 108 } else { | 108 } else { |
| 109 // There are more channels than can fit into data[], so allocate | 109 // There are more channels than can fit into data[], so allocate |
| 110 // extended_data[] and fill appropriately. | 110 // extended_data[] and fill appropriately. |
| 111 frame->extended_data = static_cast<uint8**>( | 111 frame->extended_data = static_cast<uint8_t**>( |
| 112 av_malloc(number_of_planes * sizeof(*frame->extended_data))); | 112 av_malloc(number_of_planes * sizeof(*frame->extended_data))); |
| 113 int i = 0; | 113 int i = 0; |
| 114 for (; i < AV_NUM_DATA_POINTERS; ++i) | 114 for (; i < AV_NUM_DATA_POINTERS; ++i) |
| 115 frame->extended_data[i] = frame->data[i] = buffer->channel_data()[i]; | 115 frame->extended_data[i] = frame->data[i] = buffer->channel_data()[i]; |
| 116 for (; i < number_of_planes; ++i) | 116 for (; i < number_of_planes; ++i) |
| 117 frame->extended_data[i] = buffer->channel_data()[i]; | 117 frame->extended_data[i] = buffer->channel_data()[i]; |
| 118 } | 118 } |
| 119 | 119 |
| 120 // Now create an AVBufferRef for the data just allocated. It will own the | 120 // Now create an AVBufferRef for the data just allocated. It will own the |
| 121 // reference to the AudioBuffer object. | 121 // reference to the AudioBuffer object. |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 244 const scoped_refptr<DecoderBuffer>& buffer, | 244 const scoped_refptr<DecoderBuffer>& buffer, |
| 245 bool* has_produced_frame) { | 245 bool* has_produced_frame) { |
| 246 DCHECK(!*has_produced_frame); | 246 DCHECK(!*has_produced_frame); |
| 247 | 247 |
| 248 AVPacket packet; | 248 AVPacket packet; |
| 249 av_init_packet(&packet); | 249 av_init_packet(&packet); |
| 250 if (buffer->end_of_stream()) { | 250 if (buffer->end_of_stream()) { |
| 251 packet.data = NULL; | 251 packet.data = NULL; |
| 252 packet.size = 0; | 252 packet.size = 0; |
| 253 } else { | 253 } else { |
| 254 packet.data = const_cast<uint8*>(buffer->data()); | 254 packet.data = const_cast<uint8_t*>(buffer->data()); |
| 255 packet.size = buffer->data_size(); | 255 packet.size = buffer->data_size(); |
| 256 } | 256 } |
| 257 | 257 |
| 258 // Each audio packet may contain several frames, so we must call the decoder | 258 // Each audio packet may contain several frames, so we must call the decoder |
| 259 // until we've exhausted the packet. Regardless of the packet size we always | 259 // until we've exhausted the packet. Regardless of the packet size we always |
| 260 // want to hand it to the decoder at least once, otherwise we would end up | 260 // want to hand it to the decoder at least once, otherwise we would end up |
| 261 // skipping end of stream packets since they have a size of zero. | 261 // skipping end of stream packets since they have a size of zero. |
| 262 do { | 262 do { |
| 263 int frame_decoded = 0; | 263 int frame_decoded = 0; |
| 264 const int result = avcodec_decode_audio4( | 264 const int result = avcodec_decode_audio4( |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 385 return true; | 385 return true; |
| 386 } | 386 } |
| 387 | 387 |
| 388 void FFmpegAudioDecoder::ResetTimestampState() { | 388 void FFmpegAudioDecoder::ResetTimestampState() { |
| 389 discard_helper_.reset(new AudioDiscardHelper(config_.samples_per_second(), | 389 discard_helper_.reset(new AudioDiscardHelper(config_.samples_per_second(), |
| 390 config_.codec_delay())); | 390 config_.codec_delay())); |
| 391 discard_helper_->Reset(config_.codec_delay()); | 391 discard_helper_->Reset(config_.codec_delay()); |
| 392 } | 392 } |
| 393 | 393 |
| 394 } // namespace media | 394 } // namespace media |
| OLD | NEW |