Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_audio_decoder.h" | 5 #include "media/filters/ffmpeg_audio_decoder.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | |
| 7 #include "media/base/data_buffer.h" | 8 #include "media/base/data_buffer.h" |
| 9 #include "media/base/filter_host.h" | |
| 8 #include "media/base/limits.h" | 10 #include "media/base/limits.h" |
| 9 #include "media/ffmpeg/ffmpeg_common.h" | 11 #include "media/ffmpeg/ffmpeg_common.h" |
| 10 #include "media/filters/ffmpeg_demuxer.h" | |
| 11 | |
| 12 #if !defined(USE_SSE) | |
| 13 #if defined(__SSE__) || defined(ARCH_CPU_X86_64) || _M_IX86_FP==1 | |
| 14 #define USE_SSE 1 | |
| 15 #else | |
| 16 #define USE_SSE 0 | |
| 17 #endif | |
| 18 #endif | |
| 19 #if USE_SSE | |
| 20 #include <xmmintrin.h> | |
| 21 #endif | |
| 22 | 12 |
| 23 namespace media { | 13 namespace media { |
| 24 | 14 |
| 25 // Size of the decoded audio buffer. | 15 // Returns true if the decode result was an error. |
| 26 const size_t FFmpegAudioDecoder::kOutputBufferSize = | 16 static bool IsErrorResult(int result, int decoded_size) { |
| 27 AVCODEC_MAX_AUDIO_FRAME_SIZE; | 17 return result < 0 || |
| 18 decoded_size < 0 || | |
| 19 decoded_size > AVCODEC_MAX_AUDIO_FRAME_SIZE; | |
| 20 } | |
| 21 | |
| 22 // Returns true if the decode result produced audio samples. | |
| 23 static bool ProducedAudioSamples(int decoded_size) { | |
| 24 return decoded_size > 0; | |
| 25 } | |
| 26 | |
| 27 // Returns true if the decude result was a timestamp packet and not actual audio | |
| 28 // data. | |
| 29 static bool IsTimestampMarkerPacket(int result, Buffer* input) { | |
| 30 // We can get a positive result but no decoded data. This is ok because this | |
| 31 // this can be a marker packet that only contains timestamp. | |
| 32 return result > 0 && !input->IsEndOfStream() && | |
| 33 input->GetTimestamp() != kNoTimestamp && | |
| 34 input->GetDuration() != kNoTimestamp; | |
| 35 } | |
| 36 | |
| 37 // Returns true if the decude result was end of stream. | |
| 38 static bool IsEndOfStream(int result, int decoded_size, Buffer* input) { | |
| 39 // Three conditions to meet to declare end of stream for this decoder: | |
| 40 // 1. FFmpeg didn't read anything. | |
| 41 // 2. FFmpeg didn't output anything. | |
| 42 // 3. An end of stream buffer is received. | |
| 43 return result == 0 && decoded_size == 0 && input->IsEndOfStream(); | |
| 44 } | |
| 45 | |
| 28 | 46 |
| 29 FFmpegAudioDecoder::FFmpegAudioDecoder(MessageLoop* message_loop) | 47 FFmpegAudioDecoder::FFmpegAudioDecoder(MessageLoop* message_loop) |
| 30 : DecoderBase<AudioDecoder, Buffer>(message_loop), | 48 : message_loop_(message_loop), |
| 31 codec_context_(NULL), | 49 codec_context_(NULL), |
| 32 config_(0, CHANNEL_LAYOUT_NONE, 0), | 50 config_(0, CHANNEL_LAYOUT_NONE, 0), |
| 33 estimated_next_timestamp_(kNoTimestamp) { | 51 estimated_next_timestamp_(kNoTimestamp), |
| 52 decoded_audio_size_(AVCODEC_MAX_AUDIO_FRAME_SIZE), | |
| 53 decoded_audio_(new uint8[decoded_audio_size_]), | |
| 54 pending_reads_(0) { | |
| 34 } | 55 } |
| 35 | 56 |
| 36 FFmpegAudioDecoder::~FFmpegAudioDecoder() { | 57 FFmpegAudioDecoder::~FFmpegAudioDecoder() {} |
| 58 | |
| 59 void FFmpegAudioDecoder::Flush(FilterCallback* callback) { | |
| 60 message_loop_->PostTask( | |
| 61 FROM_HERE, | |
| 62 NewRunnableMethod(this, &FFmpegAudioDecoder::DoFlush, callback)); | |
| 37 } | 63 } |
| 38 | 64 |
| 39 void FFmpegAudioDecoder::DoInitialize(DemuxerStream* demuxer_stream, | 65 void FFmpegAudioDecoder::Initialize( |
| 40 bool* success, | 66 DemuxerStream* stream, |
| 41 Task* done_cb) { | 67 FilterCallback* callback, |
| 42 base::ScopedTaskRunner done_runner(done_cb); | 68 StatisticsCallback* stats_callback) { |
| 43 *success = false; | 69 // TODO(scherkus): change Initialize() signature to pass |stream| as a |
| 70 // scoped_refptr<>. | |
| 71 scoped_refptr<DemuxerStream> ref_stream(stream); | |
| 72 message_loop_->PostTask( | |
| 73 FROM_HERE, | |
| 74 NewRunnableMethod(this, &FFmpegAudioDecoder::DoInitialize, | |
| 75 ref_stream, callback, stats_callback)); | |
| 76 } | |
| 44 | 77 |
| 45 AVStream* av_stream = demuxer_stream->GetAVStream(); | 78 AudioDecoderConfig FFmpegAudioDecoder::config() { |
| 46 if (!av_stream) { | 79 return config_; |
| 47 return; | 80 } |
| 48 } | 81 |
| 82 void FFmpegAudioDecoder::ProduceAudioSamples(scoped_refptr<Buffer> buffer) { | |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
Remove buffer from method signature since it is ne
scherkus (not reviewing)
2011/09/16 18:10:13
Good suggestion! Deferring to later CL
| |
| 83 message_loop_->PostTask( | |
| 84 FROM_HERE, | |
| 85 NewRunnableMethod(this, &FFmpegAudioDecoder::DoQueueOutput, buffer)); | |
| 86 } | |
| 87 | |
| 88 void FFmpegAudioDecoder::DoInitialize( | |
| 89 DemuxerStream* stream, | |
| 90 FilterCallback* callback, | |
| 91 StatisticsCallback* stats_callback) { | |
| 92 scoped_ptr<FilterCallback> c(callback); | |
| 93 | |
| 94 demuxer_stream_ = stream; | |
| 95 AVStream* av_stream = demuxer_stream_->GetAVStream(); | |
| 96 CHECK(av_stream); | |
| 97 | |
| 98 stats_callback_.reset(stats_callback); | |
| 49 | 99 |
| 50 // Grab the AVStream's codec context and make sure we have sensible values. | 100 // Grab the AVStream's codec context and make sure we have sensible values. |
| 51 codec_context_ = av_stream->codec; | 101 codec_context_ = av_stream->codec; |
| 52 int bps = av_get_bits_per_sample_fmt(codec_context_->sample_fmt); | 102 int bps = av_get_bits_per_sample_fmt(codec_context_->sample_fmt); |
| 53 if (codec_context_->channels <= 0 || | 103 if (codec_context_->channels <= 0 || |
| 54 codec_context_->channels > Limits::kMaxChannels || | 104 codec_context_->channels > Limits::kMaxChannels || |
| 55 (codec_context_->channel_layout == 0 && codec_context_->channels > 2) || | 105 (codec_context_->channel_layout == 0 && codec_context_->channels > 2) || |
| 56 bps <= 0 || bps > Limits::kMaxBitsPerSample || | 106 bps <= 0 || bps > Limits::kMaxBitsPerSample || |
| 57 codec_context_->sample_rate <= 0 || | 107 codec_context_->sample_rate <= 0 || |
| 58 codec_context_->sample_rate > Limits::kMaxSampleRate) { | 108 codec_context_->sample_rate > Limits::kMaxSampleRate) { |
| 59 DLOG(WARNING) << "Invalid audio stream -" | 109 DLOG(ERROR) << "Invalid audio stream -" |
| 60 << " channels: " << codec_context_->channels | 110 << " channels: " << codec_context_->channels |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
Indent
scherkus (not reviewing)
2011/09/16 18:10:13
Done.
| |
| 61 << " channel layout:" << codec_context_->channel_layout | 111 << " channel layout:" << codec_context_->channel_layout |
| 62 << " bps: " << bps | 112 << " bps: " << bps |
| 63 << " sample rate: " << codec_context_->sample_rate; | 113 << " sample rate: " << codec_context_->sample_rate; |
| 114 | |
| 115 host()->SetError(PIPELINE_ERROR_DECODE); | |
| 116 callback->Run(); | |
| 64 return; | 117 return; |
| 65 } | 118 } |
| 66 | 119 |
| 67 // Serialize calls to avcodec_open(). | 120 // Serialize calls to avcodec_open(). |
| 68 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 121 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
| 69 if (!codec || avcodec_open(codec_context_, codec) < 0) { | 122 if (!codec || avcodec_open(codec_context_, codec) < 0) { |
| 123 DLOG(ERROR) << "Could not initialize audio decoder: " | |
| 124 << codec_context_->codec_id; | |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
Indent
scherkus (not reviewing)
2011/09/16 18:10:13
Done.
| |
| 125 | |
| 126 host()->SetError(PIPELINE_ERROR_DECODE); | |
| 127 callback->Run(); | |
| 70 return; | 128 return; |
| 71 } | 129 } |
| 72 | 130 |
| 131 // Success! | |
| 73 config_.bits_per_channel = | 132 config_.bits_per_channel = |
| 74 av_get_bits_per_sample_fmt(codec_context_->sample_fmt); | 133 av_get_bits_per_sample_fmt(codec_context_->sample_fmt); |
| 75 config_.channel_layout = | 134 config_.channel_layout = |
| 76 ChannelLayoutToChromeChannelLayout(codec_context_->channel_layout, | 135 ChannelLayoutToChromeChannelLayout(codec_context_->channel_layout, |
| 77 codec_context_->channels); | 136 codec_context_->channels); |
| 78 config_.sample_rate = codec_context_->sample_rate; | 137 config_.sample_rate = codec_context_->sample_rate; |
| 79 | 138 |
| 80 // Prepare the output buffer. | 139 callback->Run(); |
| 81 output_buffer_.reset(static_cast<uint8*>(av_malloc(kOutputBufferSize))); | |
| 82 if (!output_buffer_.get()) { | |
| 83 host()->SetError(PIPELINE_ERROR_OUT_OF_MEMORY); | |
| 84 return; | |
| 85 } | |
| 86 *success = true; | |
| 87 } | 140 } |
| 88 | 141 |
| 89 AudioDecoderConfig FFmpegAudioDecoder::config() { | 142 void FFmpegAudioDecoder::DoFlush(FilterCallback* callback) { |
| 90 return config_; | 143 avcodec_flush_buffers(codec_context_); |
| 144 estimated_next_timestamp_ = kNoTimestamp; | |
| 145 | |
| 146 callback->Run(); | |
| 147 delete callback; | |
| 91 } | 148 } |
| 92 | 149 |
| 93 void FFmpegAudioDecoder::ProduceAudioSamples(scoped_refptr<Buffer> output) { | 150 void FFmpegAudioDecoder::DoQueueOutput(scoped_refptr<Buffer> output) { |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
const&
Rename to DoProduceAudioSamples().
| |
| 94 DecoderBase<AudioDecoder, Buffer>::PostReadTaskHack(output); | 151 output_buffers_.push_back(output); |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
Change this to a simple counter member variable si
| |
| 152 ReadFromDemuxerStream(); | |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
Do we want to do this if we have reached EOS?
| |
| 95 } | 153 } |
| 96 | 154 |
| 97 void FFmpegAudioDecoder::DoSeek(base::TimeDelta time, Task* done_cb) { | 155 void FFmpegAudioDecoder::DoDecodeInput(scoped_refptr<Buffer> input) { |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
const&
Rename to DoOnReadComplete() to match Flus
scherkus (not reviewing)
2011/09/16 18:10:13
Done.
| |
| 98 avcodec_flush_buffers(codec_context_); | 156 DCHECK(!output_buffers_.empty()); |
| 99 estimated_next_timestamp_ = kNoTimestamp; | 157 DCHECK_GT(pending_reads_, 0); |
| 100 done_cb->Run(); | 158 pending_reads_--; |
| 101 delete done_cb; | |
| 102 } | |
| 103 | |
| 104 // ConvertAudioF32ToS32() converts float audio (F32) to int (S32) in place. | |
| 105 // This is a temporary solution. | |
| 106 // The purpose of this short term fix is to enable WMApro, which decodes to | |
| 107 // float. | |
| 108 // The audio driver has been tested by passing the float audio thru. | |
| 109 // FFmpeg for ChromeOS only exposes U8, S16 and F32. | |
| 110 // To properly expose new audio sample types at the audio driver layer, a enum | |
| 111 // should be created to represent all suppported types, including types | |
| 112 // for Pepper. FFmpeg should be queried for type and passed along. | |
| 113 | |
| 114 // TODO(fbarchard): Remove this function. Expose all FFmpeg types to driver. | |
| 115 // TODO(fbarchard): If this function is kept, move it to audio_util.cc | |
| 116 | |
| 117 #if USE_SSE | |
| 118 const __m128 kFloatScaler = _mm_set1_ps( 2147483648.0f ); | |
| 119 static void FloatToIntSaturate(float* p) { | |
| 120 __m128 a = _mm_set1_ps(*p); | |
| 121 a = _mm_mul_ss(a, kFloatScaler); | |
| 122 *reinterpret_cast<int32*>(p) = _mm_cvtss_si32(a); | |
| 123 } | |
| 124 #else | |
| 125 const float kFloatScaler = 2147483648.0f; | |
| 126 const int kMinSample = std::numeric_limits<int32>::min(); | |
| 127 const int kMaxSample = std::numeric_limits<int32>::max(); | |
| 128 const float kMinSampleFloat = | |
| 129 static_cast<float>(std::numeric_limits<int32>::min()); | |
| 130 const float kMaxSampleFloat = | |
| 131 static_cast<float>(std::numeric_limits<int32>::max()); | |
| 132 static void FloatToIntSaturate(float* p) { | |
| 133 float f = *p * kFloatScaler + 0.5f; | |
| 134 int sample; | |
| 135 if (f <= kMinSampleFloat) { | |
| 136 sample = kMinSample; | |
| 137 } else if (f >= kMaxSampleFloat) { | |
| 138 sample = kMaxSample; | |
| 139 } else { | |
| 140 sample = static_cast<int32>(f); | |
| 141 } | |
| 142 *reinterpret_cast<int32*>(p) = sample; | |
| 143 } | |
| 144 #endif | |
| 145 static void ConvertAudioF32ToS32(void* buffer, int buffer_size) { | |
| 146 for (int i = 0; i < buffer_size / 4; ++i) { | |
| 147 FloatToIntSaturate(reinterpret_cast<float*>(buffer) + i); | |
| 148 } | |
| 149 } | |
| 150 | |
| 151 void FFmpegAudioDecoder::DoDecode(Buffer* input) { | |
| 152 PipelineStatistics statistics; | |
| 153 | 159 |
| 154 // FFmpeg tends to seek Ogg audio streams in the middle of nowhere, giving us | 160 // FFmpeg tends to seek Ogg audio streams in the middle of nowhere, giving us |
| 155 // a whole bunch of AV_NOPTS_VALUE packets. Discard them until we find | 161 // a whole bunch of AV_NOPTS_VALUE packets. Discard them until we find |
| 156 // something valid. Refer to http://crbug.com/49709 | 162 // something valid. Refer to http://crbug.com/49709 |
| 157 // TODO(hclam): remove this once fixing the issue in FFmpeg. | |
| 158 if (input->GetTimestamp() == kNoTimestamp && | 163 if (input->GetTimestamp() == kNoTimestamp && |
| 159 estimated_next_timestamp_ == kNoTimestamp && | 164 estimated_next_timestamp_ == kNoTimestamp && |
| 160 !input->IsEndOfStream()) { | 165 !input->IsEndOfStream()) { |
| 161 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | 166 ReadFromDemuxerStream(); |
| 162 return; | 167 return; |
| 163 } | 168 } |
| 164 | 169 |
| 165 // Due to FFmpeg API changes we no longer have const read-only pointers. | |
| 166 AVPacket packet; | 170 AVPacket packet; |
| 167 av_init_packet(&packet); | 171 av_init_packet(&packet); |
| 168 packet.data = const_cast<uint8*>(input->GetData()); | 172 packet.data = const_cast<uint8*>(input->GetData()); |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
Mind if we make this a little more explicit for th
scherkus (not reviewing)
2011/09/16 18:10:13
Done.
| |
| 169 packet.size = input->GetDataSize(); | 173 packet.size = input->GetDataSize(); |
| 170 | 174 |
| 175 PipelineStatistics statistics; | |
| 171 statistics.audio_bytes_decoded = input->GetDataSize(); | 176 statistics.audio_bytes_decoded = input->GetDataSize(); |
| 172 | 177 |
| 173 int16_t* output_buffer = reinterpret_cast<int16_t*>(output_buffer_.get()); | 178 int decoded_audio_size = decoded_audio_size_; |
| 174 int output_buffer_size = kOutputBufferSize; | 179 int result = avcodec_decode_audio3( |
| 175 int result = avcodec_decode_audio3(codec_context_, | 180 codec_context_, reinterpret_cast<int16_t*>(decoded_audio_.get()), |
| 176 output_buffer, | 181 &decoded_audio_size, &packet); |
| 177 &output_buffer_size, | |
| 178 &packet); | |
| 179 | 182 |
| 180 if (codec_context_->sample_fmt == SAMPLE_FMT_FLT) { | 183 if (IsErrorResult(result, decoded_audio_size)) { |
| 181 ConvertAudioF32ToS32(output_buffer, output_buffer_size); | 184 DLOG(ERROR) << "Error decoding an audio frame with timestamp: " |
| 182 } | 185 << input->GetTimestamp().InMicroseconds() << " us, duration: " |
| 183 | 186 << input->GetDuration().InMicroseconds() << " us, packet size: " |
| 184 // TODO(ajwong): Consider if kOutputBufferSize should just be an int instead | 187 << input->GetDataSize() << " bytes"; |
| 185 // of a size_t. | 188 ReadFromDemuxerStream(); |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
Do we want to issue another read if this is IsEndO
scherkus (not reviewing)
2011/09/16 18:10:13
Would that imply that the audio decoder in questio
| |
| 186 if (result < 0 || | |
| 187 output_buffer_size < 0 || | |
| 188 static_cast<size_t>(output_buffer_size) > kOutputBufferSize) { | |
| 189 VLOG(1) << "Error decoding an audio frame with timestamp: " | |
| 190 << input->GetTimestamp().InMicroseconds() << " us, duration: " | |
| 191 << input->GetDuration().InMicroseconds() << " us, packet size: " | |
| 192 << input->GetDataSize() << " bytes"; | |
| 193 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | |
| 194 return; | 189 return; |
| 195 } | 190 } |
| 196 | 191 |
| 197 // If we have decoded something, enqueue the result. | 192 scoped_refptr<DataBuffer> output; |
| 198 if (output_buffer_size) { | |
| 199 DataBuffer* result_buffer = new DataBuffer(output_buffer_size); | |
| 200 result_buffer->SetDataSize(output_buffer_size); | |
| 201 uint8* data = result_buffer->GetWritableData(); | |
| 202 memcpy(data, output_buffer, output_buffer_size); | |
| 203 | 193 |
| 204 // We don't trust the demuxer, so always calculate the duration based on | 194 if (ProducedAudioSamples(decoded_audio_size)) { |
| 205 // the actual number of samples decoded. | 195 // Copy the audio samples into an output buffer. |
| 206 base::TimeDelta duration = CalculateDuration(output_buffer_size); | 196 output = new DataBuffer(decoded_audio_size); |
| 207 result_buffer->SetDuration(duration); | 197 output->SetDataSize(decoded_audio_size); |
| 198 uint8* data = output->GetWritableData(); | |
| 199 memcpy(data, decoded_audio_.get(), decoded_audio_size); | |
| 200 } else if (IsTimestampMarkerPacket(result, input)) { | |
| 201 // Nothing else to do here but update our estimation. | |
| 202 estimated_next_timestamp_ = input->GetTimestamp() + input->GetDuration(); | |
| 203 } else if (IsEndOfStream(result, decoded_audio_size, input)) { | |
| 204 // Create an end of stream output buffer. | |
| 205 output = new DataBuffer(0); | |
| 206 output->SetTimestamp(input->GetTimestamp()); | |
| 207 output->SetDuration(input->GetDuration()); | |
| 208 } | |
| 208 | 209 |
| 209 // Use an estimated timestamp unless the incoming buffer has a valid one. | 210 // Decoding finished successfully, update stats and execute callback. |
| 210 if (input->GetTimestamp() == kNoTimestamp) { | 211 stats_callback_->Run(statistics); |
| 211 result_buffer->SetTimestamp(estimated_next_timestamp_); | 212 if (output) { |
| 213 DCHECK_GT(output_buffers_.size(), 0u); | |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
Replace w/ counter check and decrement.
scherkus (not reviewing)
2011/09/16 18:10:13
Future CL
| |
| 214 output_buffers_.pop_front(); | |
| 212 | 215 |
| 213 // Keep the estimated timestamp invalid until we get an incoming buffer | 216 ConsumeAudioSamples(output); |
| 214 // with a valid timestamp. This can happen during seeks, etc... | 217 } |
| 215 if (estimated_next_timestamp_ != kNoTimestamp) { | 218 } |
| 216 estimated_next_timestamp_ += duration; | |
| 217 } | |
| 218 } else { | |
| 219 result_buffer->SetTimestamp(input->GetTimestamp()); | |
| 220 estimated_next_timestamp_ = input->GetTimestamp() + duration; | |
| 221 } | |
| 222 | 219 |
| 223 EnqueueResult(result_buffer); | 220 void FFmpegAudioDecoder::ReadFromDemuxerStream() { |
| 224 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | 221 DCHECK(!output_buffers_.empty()) |
| 222 << "Reads should only occur if there are output buffers."; | |
| 223 | |
| 224 pending_reads_++; | |
| 225 demuxer_stream_->Read(base::Bind(&FFmpegAudioDecoder::OnReadComplete, | |
| 226 this)); | |
| 227 } | |
| 228 | |
| 229 void FFmpegAudioDecoder::OnReadComplete(Buffer* buffer) { | |
| 230 // TODO(scherkus): change DemuxerStream::Read() to use scoped_refptr<> for | |
| 231 // callback. | |
| 232 scoped_refptr<Buffer> ref_buffer(buffer); | |
| 233 message_loop_->PostTask( | |
| 234 FROM_HERE, | |
| 235 NewRunnableMethod(this, &FFmpegAudioDecoder::DoDecodeInput, ref_buffer)); | |
| 236 } | |
| 237 | |
| 238 void FFmpegAudioDecoder::UpdateDurationAndTimestamp( | |
| 239 const Buffer* input, | |
| 240 DataBuffer* output) { | |
| 241 // Always calculate duration based on the actual number of samples decoded. | |
| 242 base::TimeDelta duration = CalculateDuration(output->GetDataSize()); | |
| 243 output->SetDuration(duration); | |
| 244 | |
| 245 // Use the incoming timestamp if it's valid. | |
| 246 if (input->GetTimestamp() != kNoTimestamp) { | |
| 247 output->SetTimestamp(input->GetTimestamp()); | |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
Is this right given that there can be a codec dela
scherkus (not reviewing)
2011/09/16 18:10:13
Good catch! This behaviour is now codified in a un
| |
| 248 estimated_next_timestamp_ = input->GetTimestamp() + duration; | |
| 225 return; | 249 return; |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
remove return.
Something still doesn't smell righ
| |
| 226 } | 250 } |
| 227 | 251 |
| 228 // We can get a positive result but no decoded data. This is ok because this | 252 // Otherwise use an estimated timestamp and attempt to update the estimation |
| 229 // this can be a marker packet that only contains timestamp. In this case we | 253 // as long as it's valid. |
| 230 // save the timestamp for later use. | 254 output->SetTimestamp(estimated_next_timestamp_); |
| 231 if (result && !input->IsEndOfStream() && | 255 if (estimated_next_timestamp_ != kNoTimestamp) { |
| 232 input->GetTimestamp() != kNoTimestamp && | 256 estimated_next_timestamp_ += duration; |
| 233 input->GetDuration() != kNoTimestamp) { | |
| 234 estimated_next_timestamp_ = input->GetTimestamp() + input->GetDuration(); | |
| 235 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | |
| 236 return; | |
| 237 } | 257 } |
| 238 | |
| 239 // Three conditions to meet to declare end of stream for this decoder: | |
| 240 // 1. FFmpeg didn't read anything. | |
| 241 // 2. FFmpeg didn't output anything. | |
| 242 // 3. An end of stream buffer is received. | |
| 243 if (result == 0 && output_buffer_size == 0 && input->IsEndOfStream()) { | |
| 244 DataBuffer* result_buffer = new DataBuffer(0); | |
| 245 result_buffer->SetTimestamp(input->GetTimestamp()); | |
| 246 result_buffer->SetDuration(input->GetDuration()); | |
| 247 EnqueueResult(result_buffer); | |
| 248 } | |
| 249 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | |
| 250 } | 258 } |
| 251 | 259 |
| 252 base::TimeDelta FFmpegAudioDecoder::CalculateDuration(size_t size) { | 260 base::TimeDelta FFmpegAudioDecoder::CalculateDuration(int size) { |
| 253 int64 denominator = codec_context_->channels * | 261 int64 denominator = ChannelLayoutToChannelCount(config_.channel_layout) * |
| 254 av_get_bits_per_sample_fmt(codec_context_->sample_fmt) / 8 * | 262 config_.bits_per_channel / 8 * config_.sample_rate; |
| 255 codec_context_->sample_rate; | |
| 256 double microseconds = size / | 263 double microseconds = size / |
|
acolwell GONE FROM CHROMIUM
2011/09/13 22:09:28
why do we convert to a double and then back to int
scherkus (not reviewing)
2011/09/16 18:10:13
I looked at the original code review but I can't r
| |
| 257 (denominator / static_cast<double>(base::Time::kMicrosecondsPerSecond)); | 264 (denominator / static_cast<double>(base::Time::kMicrosecondsPerSecond)); |
| 258 return base::TimeDelta::FromMicroseconds(static_cast<int64>(microseconds)); | 265 return base::TimeDelta::FromMicroseconds(static_cast<int64>(microseconds)); |
| 259 } | 266 } |
| 260 | 267 |
| 261 } // namespace | 268 } // namespace media |
| OLD | NEW |