OLD | NEW |
---|---|
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_audio_decoder.h" | 5 #include "media/filters/ffmpeg_audio_decoder.h" |
6 | 6 |
7 #include "media/base/callback.h" | 7 #include "media/base/filter_host.h" |
8 #include "media/base/data_buffer.h" | |
9 #include "media/base/limits.h" | 8 #include "media/base/limits.h" |
10 #include "media/ffmpeg/ffmpeg_common.h" | 9 #include "media/ffmpeg/ffmpeg_common.h" |
11 #include "media/filters/ffmpeg_demuxer.h" | |
12 | |
13 #if !defined(USE_SSE) | |
14 #if defined(__SSE__) || defined(ARCH_CPU_X86_64) || _M_IX86_FP==1 | |
15 #define USE_SSE 1 | |
16 #else | |
17 #define USE_SSE 0 | |
18 #endif | |
19 #endif | |
20 #if USE_SSE | |
21 #include <xmmintrin.h> | |
22 #endif | |
23 | 10 |
24 namespace media { | 11 namespace media { |
25 | 12 |
26 // Size of the decoded audio buffer. | 13 static bool IsErrorResult(int result, int decoded_size) { |
27 const size_t FFmpegAudioDecoder::kOutputBufferSize = | 14 return result < 0 || |
28 AVCODEC_MAX_AUDIO_FRAME_SIZE; | 15 decoded_size < 0 || |
16 decoded_size > AVCODEC_MAX_AUDIO_FRAME_SIZE; | |
17 } | |
18 | |
19 static bool ProducedAudioSamples(int decoded_size) { | |
20 return decoded_size != 0; | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
do we want this to return true if decoded_size < 0
scherkus (not reviewing)
2011/09/11 14:49:22
Hmm... yeah these helper methods are a bit weird s
| |
21 } | |
22 | |
23 static bool IsTimestampMarkerPacket(int result, Buffer* input) { | |
24 // We can get a positive result but no decoded data. This is ok because this | |
25 // this can be a marker packet that only contains timestamp. | |
26 return result && !input->IsEndOfStream() && | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
It's ok if result < 0? Perhaps a comment indicatin
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
| |
27 input->GetTimestamp() != kNoTimestamp && | |
28 input->GetDuration() != kNoTimestamp; | |
29 } | |
30 | |
31 static bool IsEndOfStream(int result, int decoded_size, Buffer* input) { | |
32 // Three conditions to meet to declare end of stream for this decoder: | |
33 // 1. FFmpeg didn't read anything. | |
34 // 2. FFmpeg didn't output anything. | |
35 // 3. An end of stream buffer is received. | |
36 return result == 0 && decoded_size == 0 && input->IsEndOfStream(); | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
result -> read_size?
| |
37 } | |
29 | 38 |
30 FFmpegAudioDecoder::FFmpegAudioDecoder(MessageLoop* message_loop) | 39 FFmpegAudioDecoder::FFmpegAudioDecoder(MessageLoop* message_loop) |
31 : DecoderBase<AudioDecoder, Buffer>(message_loop), | 40 : message_loop_(message_loop), |
41 demuxer_stream_(NULL), | |
32 codec_context_(NULL), | 42 codec_context_(NULL), |
33 config_(0, 0, 0), | 43 config_(0, 0, 0), |
34 estimated_next_timestamp_(kNoTimestamp) { | 44 estimated_next_timestamp_(kNoTimestamp), |
45 pending_reads_(0) { | |
35 } | 46 } |
36 | 47 |
37 FFmpegAudioDecoder::~FFmpegAudioDecoder() { | 48 FFmpegAudioDecoder::~FFmpegAudioDecoder() { |
49 if (codec_context_) { | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
nit:braces not necesssary
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
| |
50 avcodec_close(codec_context_); | |
51 } | |
38 } | 52 } |
39 | 53 |
40 void FFmpegAudioDecoder::DoInitialize(DemuxerStream* demuxer_stream, | 54 void FFmpegAudioDecoder::Play(FilterCallback* callback) { |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
remove
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
| |
41 bool* success, | 55 // XXX: anything else? |
42 Task* done_cb) { | 56 callback->Run(); |
43 AutoTaskRunner done_runner(done_cb); | 57 delete callback; |
44 *success = false; | 58 } |
45 | 59 |
46 AVStream* av_stream = demuxer_stream->GetAVStream(); | 60 void FFmpegAudioDecoder::Pause(FilterCallback* callback) { |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
remove
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
| |
47 if (!av_stream) { | 61 // XXX: anything else? |
62 callback->Run(); | |
63 delete callback; | |
64 } | |
65 | |
66 void FFmpegAudioDecoder::Flush(FilterCallback* callback) { | |
67 // XXX: move to proper thread. | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Or you could just protect this with a lock?
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
| |
68 avcodec_flush_buffers(codec_context_); | |
69 estimated_next_timestamp_ = kNoTimestamp; | |
70 | |
71 callback->Run(); | |
72 delete callback; | |
73 } | |
74 | |
75 void FFmpegAudioDecoder::Stop(FilterCallback* callback) { | |
76 // XXX: anything else? | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Clear consume callback so we won't send any data d
| |
77 callback->Run(); | |
78 delete callback; | |
79 } | |
80 | |
81 void FFmpegAudioDecoder::Seek(base::TimeDelta time, FilterCallback* callback) { | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
remove
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
| |
82 // XXX: anything else? | |
83 callback->Run(); | |
84 delete callback; | |
85 } | |
86 | |
87 void FFmpegAudioDecoder::Initialize( | |
88 DemuxerStream* stream, | |
89 FilterCallback* callback, | |
90 StatisticsCallback* stats_callback) { | |
91 // XXX: do we still care about initializing on our own message_loop_? | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
I'm fine with keeping a message_loop for audio dec
| |
92 // XXX: do we want to try and use MessageLoopProxy at this time? | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
separate CL IMO
| |
93 // TODO(scherkus): change Initialize() signature to pass |stream| as a | |
94 // scoped_refptr<>. | |
95 scoped_refptr<DemuxerStream> ref_stream(stream); | |
96 message_loop_->PostTask( | |
97 FROM_HERE, | |
98 NewRunnableMethod(this, &FFmpegAudioDecoder::DoInitialize, | |
99 ref_stream, callback, stats_callback)); | |
100 } | |
101 | |
102 AudioDecoderConfig FFmpegAudioDecoder::config() { | |
103 return config_; | |
104 } | |
105 | |
106 void FFmpegAudioDecoder::ProduceAudioSamples(scoped_refptr<Buffer> buffer) { | |
107 message_loop_->PostTask( | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
What about just putting this in an "if(MessageLoo
scherkus (not reviewing)
2011/09/11 14:49:22
I prefer the extra methods for clarity and saner d
| |
108 FROM_HERE, | |
109 NewRunnableMethod(this, &FFmpegAudioDecoder::DoQueueOutputBuffer, | |
110 buffer)); | |
111 } | |
112 | |
113 void FFmpegAudioDecoder::DoInitialize( | |
114 DemuxerStream* stream, | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
consider moving this to Initialize() and using an
| |
115 FilterCallback* callback, | |
116 StatisticsCallback* stats_callback) { | |
117 scoped_ptr<FilterCallback> c(callback); | |
118 | |
119 demuxer_stream_ = stream; | |
120 AVStream* av_stream = demuxer_stream_->GetAVStream(); | |
121 CHECK(av_stream); | |
122 | |
123 stats_callback_.reset(stats_callback); | |
124 | |
125 // Sanity checks. | |
126 AVCodecContext* codec_context = av_stream->codec; | |
127 if (!codec_context || | |
128 codec_context->channels <= 0 || | |
129 codec_context->channels > Limits::kMaxChannels || | |
130 codec_context->sample_rate <= 0 || | |
131 codec_context->sample_rate > Limits::kMaxSampleRate) { | |
132 DLOG(ERROR) << "Invalid audio format (" | |
133 << " channels: " << codec_context->channels | |
134 << " sample rate: " << codec_context->sample_rate << ")"; | |
135 | |
136 // XXX: should this be a "format" error? | |
137 host()->SetError(PIPELINE_ERROR_DECODE); | |
138 callback->Run(); | |
48 return; | 139 return; |
49 } | 140 } |
50 | 141 |
51 // Grab the AVStream's codec context and make sure we have sensible values. | 142 AVCodec* codec = avcodec_find_decoder(codec_context->codec_id); |
52 codec_context_ = av_stream->codec; | 143 if (!codec) { |
53 int bps = av_get_bits_per_sample_fmt(codec_context_->sample_fmt); | 144 DLOG(ERROR) << "Audio codec not found (" |
54 if (codec_context_->channels <= 0 || | 145 << " codec_id: " << codec_context->codec_id << ")"; |
55 codec_context_->channels > Limits::kMaxChannels || | 146 |
56 bps <= 0 || bps > Limits::kMaxBitsPerSample || | 147 // XXX: should this be a "format" error? |
57 codec_context_->sample_rate <= 0 || | 148 host()->SetError(PIPELINE_ERROR_DECODE); |
58 codec_context_->sample_rate > Limits::kMaxSampleRate) { | 149 callback->Run(); |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Would this be a good time to convert Initialize()
scherkus (not reviewing)
2011/09/11 14:49:22
Follow up CL
| |
59 DLOG(WARNING) << "Invalid audio stream -" | |
60 << " channels: " << codec_context_->channels | |
61 << " bps: " << bps | |
62 << " sample rate: " << codec_context_->sample_rate; | |
63 return; | 150 return; |
64 } | 151 } |
65 | 152 |
66 // Serialize calls to avcodec_open(). | |
67 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | |
68 if (!codec || avcodec_open(codec_context_, codec) < 0) { | |
69 return; | |
70 } | |
71 | |
72 config_.bits_per_channel = | 153 config_.bits_per_channel = |
73 av_get_bits_per_sample_fmt(codec_context_->sample_fmt); | 154 av_get_bits_per_sample_fmt(codec_context_->sample_fmt); |
74 config_.channels_per_sample = codec_context_->channels; | 155 config_.channels_per_sample = codec_context_->channels; |
75 config_.sample_rate = codec_context_->sample_rate; | 156 config_.sample_rate = codec_context_->sample_rate; |
76 | 157 |
77 // Prepare the output buffer. | 158 // Prep our own AVCodecContext struct. |
78 output_buffer_.reset(static_cast<uint8*>(av_malloc(kOutputBufferSize))); | 159 codec_context_ = avcodec_alloc_context(); |
79 if (!output_buffer_.get()) { | 160 codec_context_->channels = codec_context->channels; |
80 host()->SetError(PIPELINE_ERROR_OUT_OF_MEMORY); | 161 codec_context_->sample_rate = codec_context->sample_rate; |
162 codec_context_->sample_fmt = codec_context->sample_fmt; | |
163 | |
164 if (avcodec_open(codec_context_, codec) < 0) { | |
165 DLOG(ERROR) << "Could not initialize audio decoder (" | |
166 << " codec_id: " << codec_context->codec_id | |
167 << " channels: " << codec_context->channels | |
168 << " sample rate: " << codec_context->sample_rate << ")"; | |
169 | |
170 // XXX: should this be a "format" error? | |
171 host()->SetError(PIPELINE_ERROR_DECODE); | |
172 callback->Run(); | |
81 return; | 173 return; |
82 } | 174 } |
83 *success = true; | 175 |
176 // XXX: the div-by-2 scares me a tiny bit for some reason. | |
177 decoded_audio_.reset(new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2]); | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
AVCODEC_MAX_AUDIO_FRAME_SIZE in bytes. We're alloc
scherkus (not reviewing)
2011/09/11 14:49:22
Done and we'll do a reinterpret_cast<> in decode c
| |
178 | |
179 callback->Run(); | |
84 } | 180 } |
85 | 181 |
86 AudioDecoderConfig FFmpegAudioDecoder::config() { | 182 void FFmpegAudioDecoder::DoQueueOutputBuffer(scoped_refptr<Buffer> output) { |
87 return config_; | 183 output_buffers_.push_back(output); |
184 ReadFromDemuxerStream(); | |
88 } | 185 } |
89 | 186 |
90 void FFmpegAudioDecoder::ProduceAudioSamples(scoped_refptr<Buffer> output) { | 187 void FFmpegAudioDecoder::DoDecodeInputBuffer(scoped_refptr<Buffer> input) { |
91 DecoderBase<AudioDecoder, Buffer>::PostReadTaskHack(output); | 188 DCHECK(!output_buffers_.empty()); |
92 } | 189 DCHECK_GT(pending_reads_, 0); |
93 | 190 pending_reads_--; |
94 void FFmpegAudioDecoder::DoSeek(base::TimeDelta time, Task* done_cb) { | |
95 avcodec_flush_buffers(codec_context_); | |
96 estimated_next_timestamp_ = kNoTimestamp; | |
97 done_cb->Run(); | |
98 delete done_cb; | |
99 } | |
100 | |
101 // ConvertAudioF32ToS32() converts float audio (F32) to int (S32) in place. | |
102 // This is a temporary solution. | |
103 // The purpose of this short term fix is to enable WMApro, which decodes to | |
104 // float. | |
105 // The audio driver has been tested by passing the float audio thru. | |
106 // FFmpeg for ChromeOS only exposes U8, S16 and F32. | |
107 // To properly expose new audio sample types at the audio driver layer, a enum | |
108 // should be created to represent all suppported types, including types | |
109 // for Pepper. FFmpeg should be queried for type and passed along. | |
110 | |
111 // TODO(fbarchard): Remove this function. Expose all FFmpeg types to driver. | |
112 // TODO(fbarchard): If this function is kept, move it to audio_util.cc | |
113 | |
114 #if USE_SSE | |
115 const __m128 kFloatScaler = _mm_set1_ps( 2147483648.0f ); | |
116 static void FloatToIntSaturate(float* p) { | |
117 __m128 a = _mm_set1_ps(*p); | |
118 a = _mm_mul_ss(a, kFloatScaler); | |
119 *reinterpret_cast<int32*>(p) = _mm_cvtss_si32(a); | |
120 } | |
121 #else | |
122 const float kFloatScaler = 2147483648.0f; | |
123 const int kMinSample = std::numeric_limits<int32>::min(); | |
124 const int kMaxSample = std::numeric_limits<int32>::max(); | |
125 const float kMinSampleFloat = | |
126 static_cast<float>(std::numeric_limits<int32>::min()); | |
127 const float kMaxSampleFloat = | |
128 static_cast<float>(std::numeric_limits<int32>::max()); | |
129 static void FloatToIntSaturate(float* p) { | |
130 float f = *p * kFloatScaler + 0.5f; | |
131 int sample; | |
132 if (f <= kMinSampleFloat) { | |
133 sample = kMinSample; | |
134 } else if (f >= kMaxSampleFloat) { | |
135 sample = kMaxSample; | |
136 } else { | |
137 sample = static_cast<int32>(f); | |
138 } | |
139 *reinterpret_cast<int32*>(p) = sample; | |
140 } | |
141 #endif | |
142 static void ConvertAudioF32ToS32(void* buffer, int buffer_size) { | |
143 for (int i = 0; i < buffer_size / 4; ++i) { | |
144 FloatToIntSaturate(reinterpret_cast<float*>(buffer) + i); | |
145 } | |
146 } | |
147 | |
148 void FFmpegAudioDecoder::DoDecode(Buffer* input) { | |
149 PipelineStatistics statistics; | |
150 | 191 |
151 // FFmpeg tends to seek Ogg audio streams in the middle of nowhere, giving us | 192 // FFmpeg tends to seek Ogg audio streams in the middle of nowhere, giving us |
152 // a whole bunch of AV_NOPTS_VALUE packets. Discard them until we find | 193 // a whole bunch of AV_NOPTS_VALUE packets. Discard them until we find |
153 // something valid. Refer to http://crbug.com/49709 | 194 // something valid. Refer to http://crbug.com/49709 |
154 // TODO(hclam): remove this once fixing the issue in FFmpeg. | |
155 if (input->GetTimestamp() == kNoTimestamp && | 195 if (input->GetTimestamp() == kNoTimestamp && |
156 estimated_next_timestamp_ == kNoTimestamp && | 196 estimated_next_timestamp_ == kNoTimestamp && |
157 !input->IsEndOfStream()) { | 197 !input->IsEndOfStream()) { |
158 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | 198 ReadFromDemuxerStream(); |
159 return; | 199 return; |
160 } | 200 } |
161 | 201 |
162 // Due to FFmpeg API changes we no longer have const read-only pointers. | |
163 AVPacket packet; | 202 AVPacket packet; |
164 av_init_packet(&packet); | 203 av_init_packet(&packet); |
165 packet.data = const_cast<uint8*>(input->GetData()); | 204 packet.data = const_cast<uint8*>(input->GetData()); |
166 packet.size = input->GetDataSize(); | 205 packet.size = input->GetDataSize(); |
167 | 206 |
207 PipelineStatistics statistics; | |
168 statistics.audio_bytes_decoded = input->GetDataSize(); | 208 statistics.audio_bytes_decoded = input->GetDataSize(); |
169 | 209 |
170 int16_t* output_buffer = reinterpret_cast<int16_t*>(output_buffer_.get()); | 210 int decoded_audio_size = AVCODEC_MAX_AUDIO_FRAME_SIZE; |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
This should be based on the size allocated. Perhap
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
| |
171 int output_buffer_size = kOutputBufferSize; | |
172 int result = avcodec_decode_audio3(codec_context_, | 211 int result = avcodec_decode_audio3(codec_context_, |
173 output_buffer, | 212 decoded_audio_.get(), |
174 &output_buffer_size, | 213 &decoded_audio_size, |
175 &packet); | 214 &packet); |
176 | 215 |
216 // XXX: this is only for WMAPro audio codec?! remove?! | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
I'm fine with removing this since I don't think we
scherkus (not reviewing)
2011/09/11 14:49:22
Done.
| |
217 #if 0 | |
177 if (codec_context_->sample_fmt == SAMPLE_FMT_FLT) { | 218 if (codec_context_->sample_fmt == SAMPLE_FMT_FLT) { |
178 ConvertAudioF32ToS32(output_buffer, output_buffer_size); | 219 ConvertAudioF32ToS32(decoded_audio_.get(), decoded_audio_size); |
179 } | 220 } |
221 #endif | |
180 | 222 |
181 // TODO(ajwong): Consider if kOutputBufferSize should just be an int instead | 223 if (IsErrorResult(result, decoded_audio_size)) { |
182 // of a size_t. | 224 DLOG(ERROR) << "Error decoding an audio frame with timestamp: " |
183 if (result < 0 || | 225 << input->GetTimestamp().InMicroseconds() << " us, duration: " |
184 output_buffer_size < 0 || | 226 << input->GetDuration().InMicroseconds() << " us, packet size: " |
185 static_cast<size_t>(output_buffer_size) > kOutputBufferSize) { | 227 << input->GetDataSize() << " bytes"; |
186 VLOG(1) << "Error decoding an audio frame with timestamp: " | 228 ReadFromDemuxerStream(); |
187 << input->GetTimestamp().InMicroseconds() << " us, duration: " | |
188 << input->GetDuration().InMicroseconds() << " us, packet size: " | |
189 << input->GetDataSize() << " bytes"; | |
190 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | |
191 return; | 229 return; |
192 } | 230 } |
193 | 231 |
194 // If we have decoded something, enqueue the result. | 232 scoped_refptr<DataBuffer> output; |
195 if (output_buffer_size) { | |
196 DataBuffer* result_buffer = new DataBuffer(output_buffer_size); | |
197 result_buffer->SetDataSize(output_buffer_size); | |
198 uint8* data = result_buffer->GetWritableData(); | |
199 memcpy(data, output_buffer, output_buffer_size); | |
200 | 233 |
201 // We don't trust the demuxer, so always calculate the duration based on | 234 if (ProducedAudioSamples(decoded_audio_size)) { |
202 // the actual number of samples decoded. | 235 // Copy the audio samples into an output buffer. |
203 base::TimeDelta duration = CalculateDuration(output_buffer_size); | 236 output = new DataBuffer(decoded_audio_size); |
204 result_buffer->SetDuration(duration); | 237 output->SetDataSize(decoded_audio_size); |
238 uint8* data = output->GetWritableData(); | |
239 memcpy(data, decoded_audio_.get(), decoded_audio_size); | |
240 } else if (IsTimestampMarkerPacket(result, input)) { | |
241 // Nothing else to do here but update our estimation. | |
242 estimated_next_timestamp_ = input->GetTimestamp() + input->GetDuration(); | |
243 } else if (IsEndOfStream(result, decoded_audio_size, input)) { | |
244 // Create an end of stream output buffer. | |
245 output = new DataBuffer(0); | |
246 output->SetTimestamp(input->GetTimestamp()); | |
247 output->SetDuration(input->GetDuration()); | |
248 } | |
205 | 249 |
206 // Use an estimated timestamp unless the incoming buffer has a valid one. | 250 DecodeFinished(output, statistics); |
207 if (input->GetTimestamp() == kNoTimestamp) { | 251 } |
208 result_buffer->SetTimestamp(estimated_next_timestamp_); | |
209 | 252 |
210 // Keep the estimated timestamp invalid until we get an incoming buffer | 253 void FFmpegAudioDecoder::DecodeFinished( |
211 // with a valid timestamp. This can happen during seeks, etc... | 254 scoped_refptr<Buffer> output, |
212 if (estimated_next_timestamp_ != kNoTimestamp) { | 255 const PipelineStatistics& statistics) { |
213 estimated_next_timestamp_ += duration; | 256 stats_callback_->Run(statistics); |
214 } | |
215 } else { | |
216 result_buffer->SetTimestamp(input->GetTimestamp()); | |
217 estimated_next_timestamp_ = input->GetTimestamp() + duration; | |
218 } | |
219 | 257 |
220 EnqueueResult(result_buffer); | 258 if (output) { |
221 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | 259 DCHECK_GT(output_buffers_.size(), 0u); |
260 output_buffers_.pop_front(); | |
261 | |
262 ConsumeAudioSamples(output); | |
263 } | |
264 } | |
265 | |
266 void FFmpegAudioDecoder::ReadFromDemuxerStream() { | |
267 DCHECK(!output_buffers_.empty()) | |
268 << "Reads should only occur if there are output buffers."; | |
269 | |
270 pending_reads_++; | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Add stopped check to make sure we don't call the d
| |
271 demuxer_stream_->Read( | |
272 NewCallback(this, &FFmpegAudioDecoder::OnReadComplete)); | |
273 } | |
274 | |
275 void FFmpegAudioDecoder::OnReadComplete(Buffer* buffer) { | |
276 // TODO(scherkus): change DemuxerStream::Read() to use scoped_refptr<> for | |
277 // callback. | |
278 scoped_refptr<Buffer> ref_buffer(buffer); | |
279 message_loop_->PostTask( | |
280 FROM_HERE, | |
281 NewRunnableMethod(this, &FFmpegAudioDecoder::DoDecodeInputBuffer, | |
acolwell GONE FROM CHROMIUM
2011/05/13 19:35:37
Put this in an message loop guard and copy DoDecod
| |
282 ref_buffer)); | |
283 } | |
284 | |
285 void FFmpegAudioDecoder::UpdateDurationAndTimestamp( | |
286 Buffer* input, | |
287 DataBuffer* output) { | |
288 // Always calculate duration based on the actual number of samples decoded. | |
289 base::TimeDelta duration = CalculateDuration(output->GetDataSize()); | |
290 output->SetDuration(duration); | |
291 | |
292 // Use the incoming timestamp if it's valid. | |
293 if (input->GetTimestamp() != kNoTimestamp) { | |
294 output->SetTimestamp(input->GetTimestamp()); | |
295 estimated_next_timestamp_ = input->GetTimestamp() + duration; | |
222 return; | 296 return; |
223 } | 297 } |
224 | 298 |
225 // We can get a positive result but no decoded data. This is ok because this | 299 // Otherwise use an estimated timestamp and attempt to update the estimation |
226 // this can be a marker packet that only contains timestamp. In this case we | 300 // as long as it's valid. |
227 // save the timestamp for later use. | 301 output->SetTimestamp(estimated_next_timestamp_); |
228 if (result && !input->IsEndOfStream() && | 302 if (estimated_next_timestamp_ != kNoTimestamp) { |
229 input->GetTimestamp() != kNoTimestamp && | 303 estimated_next_timestamp_ += duration; |
230 input->GetDuration() != kNoTimestamp) { | |
231 estimated_next_timestamp_ = input->GetTimestamp() + input->GetDuration(); | |
232 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | |
233 return; | |
234 } | 304 } |
235 | |
236 // Three conditions to meet to declare end of stream for this decoder: | |
237 // 1. FFmpeg didn't read anything. | |
238 // 2. FFmpeg didn't output anything. | |
239 // 3. An end of stream buffer is received. | |
240 if (result == 0 && output_buffer_size == 0 && input->IsEndOfStream()) { | |
241 DataBuffer* result_buffer = new DataBuffer(0); | |
242 result_buffer->SetTimestamp(input->GetTimestamp()); | |
243 result_buffer->SetDuration(input->GetDuration()); | |
244 EnqueueResult(result_buffer); | |
245 } | |
246 DecoderBase<AudioDecoder, Buffer>::OnDecodeComplete(statistics); | |
247 } | 305 } |
248 | 306 |
249 base::TimeDelta FFmpegAudioDecoder::CalculateDuration(size_t size) { | 307 base::TimeDelta FFmpegAudioDecoder::CalculateDuration(int size) { |
250 int64 denominator = codec_context_->channels * | 308 // XXX: Could we make this function part of AudioDecoderConfig? Something like |
251 av_get_bits_per_sample_fmt(codec_context_->sample_fmt) / 8 * | 309 // "I want a duration for this config based on # of bytes"? |
252 codec_context_->sample_rate; | 310 int64 denominator = config_.channels_per_sample * |
311 config_.bits_per_channel / 8 * config_.sample_rate; | |
253 double microseconds = size / | 312 double microseconds = size / |
254 (denominator / static_cast<double>(base::Time::kMicrosecondsPerSecond)); | 313 (denominator / static_cast<double>(base::Time::kMicrosecondsPerSecond)); |
255 return base::TimeDelta::FromMicroseconds(static_cast<int64>(microseconds)); | 314 return base::TimeDelta::FromMicroseconds(static_cast<int64>(microseconds)); |
256 } | 315 } |
257 | 316 |
258 } // namespace | 317 } // namespace media |
OLD | NEW |