OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "media/filters/audio_renderer_base.h" | |
6 | |
7 #include <math.h> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/callback.h" | |
11 #include "base/callback_helpers.h" | |
12 #include "base/logging.h" | |
13 #include "media/base/filter_host.h" | |
14 #include "media/audio/audio_util.h" | |
15 | |
16 namespace media { | |
17 | |
18 AudioRendererBase::AudioRendererBase(media::AudioRendererSink* sink) | |
19 : state_(kUninitialized), | |
20 pending_read_(false), | |
21 received_end_of_stream_(false), | |
22 rendered_end_of_stream_(false), | |
23 bytes_per_frame_(0), | |
24 bytes_per_second_(0), | |
25 stopped_(false), | |
26 sink_(sink), | |
27 is_initialized_(false), | |
28 read_cb_(base::Bind(&AudioRendererBase::DecodedAudioReady, | |
29 base::Unretained(this))) { | |
30 } | |
31 | |
32 AudioRendererBase::~AudioRendererBase() { | |
33 // Stop() should have been called and |algorithm_| should have been destroyed. | |
34 DCHECK(state_ == kUninitialized || state_ == kStopped); | |
35 DCHECK(!algorithm_.get()); | |
36 } | |
37 | |
38 void AudioRendererBase::Play(const base::Closure& callback) { | |
39 { | |
40 base::AutoLock auto_lock(lock_); | |
41 DCHECK_EQ(kPaused, state_); | |
42 state_ = kPlaying; | |
43 callback.Run(); | |
44 } | |
45 | |
46 if (stopped_) | |
47 return; | |
48 | |
49 if (GetPlaybackRate() != 0.0f) { | |
50 DoPlay(); | |
51 } else { | |
52 DoPause(); | |
53 } | |
54 } | |
55 | |
56 void AudioRendererBase::DoPlay() { | |
57 earliest_end_time_ = base::Time::Now(); | |
58 DCHECK(sink_.get()); | |
59 sink_->Play(); | |
60 } | |
61 | |
62 void AudioRendererBase::Pause(const base::Closure& callback) { | |
63 { | |
64 base::AutoLock auto_lock(lock_); | |
65 DCHECK(state_ == kPlaying || state_ == kUnderflow || | |
66 state_ == kRebuffering); | |
67 pause_cb_ = callback; | |
68 state_ = kPaused; | |
69 | |
70 // Pause only when we've completed our pending read. | |
71 if (!pending_read_) { | |
72 pause_cb_.Run(); | |
73 pause_cb_.Reset(); | |
74 } | |
75 } | |
76 | |
77 if (stopped_) | |
78 return; | |
79 | |
80 DoPause(); | |
81 } | |
82 | |
83 void AudioRendererBase::DoPause() { | |
84 DCHECK(sink_.get()); | |
85 sink_->Pause(false); | |
86 } | |
87 | |
88 void AudioRendererBase::Flush(const base::Closure& callback) { | |
89 decoder_->Reset(callback); | |
90 } | |
91 | |
92 void AudioRendererBase::Stop(const base::Closure& callback) { | |
93 if (!stopped_) { | |
94 DCHECK(sink_.get()); | |
95 sink_->Stop(); | |
96 | |
97 stopped_ = true; | |
98 } | |
99 { | |
100 base::AutoLock auto_lock(lock_); | |
101 state_ = kStopped; | |
102 algorithm_.reset(NULL); | |
103 time_cb_.Reset(); | |
104 underflow_cb_.Reset(); | |
105 } | |
106 if (!callback.is_null()) { | |
107 callback.Run(); | |
108 } | |
109 } | |
110 | |
111 void AudioRendererBase::Seek(base::TimeDelta time, const PipelineStatusCB& cb) { | |
112 base::AutoLock auto_lock(lock_); | |
113 DCHECK_EQ(kPaused, state_); | |
114 DCHECK(!pending_read_) << "Pending read must complete before seeking"; | |
115 DCHECK(pause_cb_.is_null()); | |
116 DCHECK(seek_cb_.is_null()); | |
117 state_ = kSeeking; | |
118 seek_cb_ = cb; | |
119 seek_timestamp_ = time; | |
120 | |
121 // Throw away everything and schedule our reads. | |
122 audio_time_buffered_ = base::TimeDelta(); | |
123 received_end_of_stream_ = false; | |
124 rendered_end_of_stream_ = false; | |
125 | |
126 // |algorithm_| will request more reads. | |
127 algorithm_->FlushBuffers(); | |
128 | |
129 if (stopped_) | |
130 return; | |
131 | |
132 DoSeek(); | |
133 } | |
134 | |
135 void AudioRendererBase::DoSeek() { | |
136 earliest_end_time_ = base::Time::Now(); | |
137 | |
138 // Pause and flush the stream when we seek to a new location. | |
139 sink_->Pause(true); | |
140 } | |
141 | |
142 void AudioRendererBase::Initialize(const scoped_refptr<AudioDecoder>& decoder, | |
143 const PipelineStatusCB& init_cb, | |
144 const base::Closure& underflow_cb, | |
145 const TimeCB& time_cb) { | |
146 DCHECK(decoder); | |
147 DCHECK(!init_cb.is_null()); | |
148 DCHECK(!underflow_cb.is_null()); | |
149 DCHECK(!time_cb.is_null()); | |
150 DCHECK_EQ(kUninitialized, state_); | |
151 decoder_ = decoder; | |
152 underflow_cb_ = underflow_cb; | |
153 time_cb_ = time_cb; | |
154 | |
155 // Create a callback so our algorithm can request more reads. | |
156 base::Closure cb = base::Bind(&AudioRendererBase::ScheduleRead_Locked, this); | |
157 | |
158 // Construct the algorithm. | |
159 algorithm_.reset(new AudioRendererAlgorithmBase()); | |
160 | |
161 // Initialize our algorithm with media properties, initial playback rate, | |
162 // and a callback to request more reads from the data source. | |
163 ChannelLayout channel_layout = decoder_->channel_layout(); | |
164 int channels = ChannelLayoutToChannelCount(channel_layout); | |
165 int bits_per_channel = decoder_->bits_per_channel(); | |
166 int sample_rate = decoder_->samples_per_second(); | |
167 // TODO(vrk): Add method to AudioDecoder to compute bytes per frame. | |
168 bytes_per_frame_ = channels * bits_per_channel / 8; | |
169 | |
170 bool config_ok = algorithm_->ValidateConfig(channels, sample_rate, | |
171 bits_per_channel); | |
172 if (!config_ok || is_initialized_) { | |
173 init_cb.Run(PIPELINE_ERROR_INITIALIZATION_FAILED); | |
174 return; | |
175 } | |
176 | |
177 if (config_ok) | |
178 algorithm_->Initialize(channels, sample_rate, bits_per_channel, 0.0f, cb); | |
179 | |
180 // We use the AUDIO_PCM_LINEAR flag because AUDIO_PCM_LOW_LATENCY | |
181 // does not currently support all the sample-rates that we require. | |
182 // Please see: http://code.google.com/p/chromium/issues/detail?id=103627 | |
183 // for more details. | |
184 audio_parameters_ = AudioParameters( | |
185 AudioParameters::AUDIO_PCM_LINEAR, channel_layout, sample_rate, | |
186 bits_per_channel, GetHighLatencyOutputBufferSize(sample_rate)); | |
187 | |
188 bytes_per_second_ = audio_parameters_.GetBytesPerSecond(); | |
189 | |
190 DCHECK(sink_.get()); | |
191 DCHECK(!is_initialized_); | |
192 | |
193 sink_->Initialize(audio_parameters_, this); | |
194 | |
195 sink_->Start(); | |
196 is_initialized_ = true; | |
197 | |
198 // Finally, execute the start callback. | |
199 state_ = kPaused; | |
200 init_cb.Run(PIPELINE_OK); | |
201 } | |
202 | |
203 bool AudioRendererBase::HasEnded() { | |
204 base::AutoLock auto_lock(lock_); | |
205 DCHECK(!rendered_end_of_stream_ || algorithm_->NeedsMoreData()); | |
206 | |
207 return received_end_of_stream_ && rendered_end_of_stream_; | |
208 } | |
209 | |
210 void AudioRendererBase::ResumeAfterUnderflow(bool buffer_more_audio) { | |
211 base::AutoLock auto_lock(lock_); | |
212 if (state_ == kUnderflow) { | |
213 if (buffer_more_audio) | |
214 algorithm_->IncreaseQueueCapacity(); | |
215 | |
216 state_ = kRebuffering; | |
217 } | |
218 } | |
219 | |
220 void AudioRendererBase::SetVolume(float volume) { | |
221 if (stopped_) | |
222 return; | |
223 sink_->SetVolume(volume); | |
224 } | |
225 | |
226 void AudioRendererBase::DecodedAudioReady(scoped_refptr<Buffer> buffer) { | |
227 base::AutoLock auto_lock(lock_); | |
228 DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying || | |
229 state_ == kUnderflow || state_ == kRebuffering || state_ == kStopped); | |
230 | |
231 CHECK(pending_read_); | |
232 pending_read_ = false; | |
233 | |
234 if (buffer && buffer->IsEndOfStream()) { | |
235 received_end_of_stream_ = true; | |
236 | |
237 // Transition to kPlaying if we are currently handling an underflow since | |
238 // no more data will be arriving. | |
239 if (state_ == kUnderflow || state_ == kRebuffering) | |
240 state_ = kPlaying; | |
241 } | |
242 | |
243 switch (state_) { | |
244 case kUninitialized: | |
245 NOTREACHED(); | |
246 return; | |
247 case kPaused: | |
248 if (buffer && !buffer->IsEndOfStream()) | |
249 algorithm_->EnqueueBuffer(buffer); | |
250 DCHECK(!pending_read_); | |
251 base::ResetAndReturn(&pause_cb_).Run(); | |
252 return; | |
253 case kSeeking: | |
254 if (IsBeforeSeekTime(buffer)) { | |
255 ScheduleRead_Locked(); | |
256 return; | |
257 } | |
258 if (buffer && !buffer->IsEndOfStream()) { | |
259 algorithm_->EnqueueBuffer(buffer); | |
260 if (!algorithm_->IsQueueFull()) | |
261 return; | |
262 } | |
263 state_ = kPaused; | |
264 base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK); | |
265 return; | |
266 case kPlaying: | |
267 case kUnderflow: | |
268 case kRebuffering: | |
269 if (buffer && !buffer->IsEndOfStream()) | |
270 algorithm_->EnqueueBuffer(buffer); | |
271 return; | |
272 case kStopped: | |
273 return; | |
274 } | |
275 } | |
276 | |
277 void AudioRendererBase::SignalEndOfStream() { | |
278 DCHECK(received_end_of_stream_); | |
279 if (!rendered_end_of_stream_) { | |
280 rendered_end_of_stream_ = true; | |
281 host()->NotifyEnded(); | |
282 } | |
283 } | |
284 | |
285 void AudioRendererBase::ScheduleRead_Locked() { | |
286 lock_.AssertAcquired(); | |
287 if (pending_read_ || state_ == kPaused) | |
288 return; | |
289 pending_read_ = true; | |
290 decoder_->Read(read_cb_); | |
291 } | |
292 | |
293 void AudioRendererBase::SetPlaybackRate(float playback_rate) { | |
294 DCHECK_LE(0.0f, playback_rate); | |
295 | |
296 if (!stopped_) { | |
297 // Notify sink of new playback rate. | |
298 sink_->SetPlaybackRate(playback_rate); | |
299 | |
300 // We have two cases here: | |
301 // Play: GetPlaybackRate() == 0.0 && playback_rate != 0.0 | |
302 // Pause: GetPlaybackRate() != 0.0 && playback_rate == 0.0 | |
303 if (GetPlaybackRate() == 0.0f && playback_rate != 0.0f) { | |
304 DoPlay(); | |
305 } else if (GetPlaybackRate() != 0.0f && playback_rate == 0.0f) { | |
306 // Pause is easy, we can always pause. | |
307 DoPause(); | |
308 } | |
309 } | |
310 | |
311 base::AutoLock auto_lock(lock_); | |
312 algorithm_->SetPlaybackRate(playback_rate); | |
313 } | |
314 | |
315 float AudioRendererBase::GetPlaybackRate() { | |
316 base::AutoLock auto_lock(lock_); | |
317 return algorithm_->playback_rate(); | |
318 } | |
319 | |
320 bool AudioRendererBase::IsBeforeSeekTime(const scoped_refptr<Buffer>& buffer) { | |
321 return (state_ == kSeeking) && buffer && !buffer->IsEndOfStream() && | |
322 (buffer->GetTimestamp() + buffer->GetDuration()) < seek_timestamp_; | |
323 } | |
324 | |
325 int AudioRendererBase::Render(const std::vector<float*>& audio_data, | |
326 int number_of_frames, | |
327 int audio_delay_milliseconds) { | |
328 if (stopped_ || GetPlaybackRate() == 0.0f) { | |
329 // Output silence if stopped. | |
330 for (size_t i = 0; i < audio_data.size(); ++i) | |
331 memset(audio_data[i], 0, sizeof(float) * number_of_frames); | |
332 return 0; | |
333 } | |
334 | |
335 // Adjust the playback delay. | |
336 base::TimeDelta request_delay = | |
337 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); | |
338 | |
339 // Finally we need to adjust the delay according to playback rate. | |
340 if (GetPlaybackRate() != 1.0f) { | |
341 request_delay = base::TimeDelta::FromMicroseconds( | |
342 static_cast<int64>(ceil(request_delay.InMicroseconds() * | |
343 GetPlaybackRate()))); | |
344 } | |
345 | |
346 int bytes_per_frame = audio_parameters_.GetBytesPerFrame(); | |
347 | |
348 const int buf_size = number_of_frames * bytes_per_frame; | |
349 scoped_array<uint8> buf(new uint8[buf_size]); | |
350 | |
351 int frames_filled = FillBuffer(buf.get(), number_of_frames, request_delay); | |
352 int bytes_filled = frames_filled * bytes_per_frame; | |
353 DCHECK_LE(bytes_filled, buf_size); | |
354 UpdateEarliestEndTime(bytes_filled, request_delay, base::Time::Now()); | |
355 | |
356 // Deinterleave each audio channel. | |
357 int channels = audio_data.size(); | |
358 for (int channel_index = 0; channel_index < channels; ++channel_index) { | |
359 media::DeinterleaveAudioChannel(buf.get(), | |
360 audio_data[channel_index], | |
361 channels, | |
362 channel_index, | |
363 bytes_per_frame / channels, | |
364 frames_filled); | |
365 | |
366 // If FillBuffer() didn't give us enough data then zero out the remainder. | |
367 if (frames_filled < number_of_frames) { | |
368 int frames_to_zero = number_of_frames - frames_filled; | |
369 memset(audio_data[channel_index] + frames_filled, | |
370 0, | |
371 sizeof(float) * frames_to_zero); | |
372 } | |
373 } | |
374 return frames_filled; | |
375 } | |
376 | |
377 uint32 AudioRendererBase::FillBuffer(uint8* dest, | |
378 uint32 requested_frames, | |
379 const base::TimeDelta& playback_delay) { | |
380 // The |audio_time_buffered_| is the ending timestamp of the last frame | |
381 // buffered at the audio device. |playback_delay| is the amount of time | |
382 // buffered at the audio device. The current time can be computed by their | |
383 // difference. | |
384 base::TimeDelta current_time = audio_time_buffered_ - playback_delay; | |
385 | |
386 size_t frames_written = 0; | |
387 base::Closure underflow_cb; | |
388 { | |
389 base::AutoLock auto_lock(lock_); | |
390 | |
391 if (state_ == kRebuffering && algorithm_->IsQueueFull()) | |
392 state_ = kPlaying; | |
393 | |
394 // Mute audio by returning 0 when not playing. | |
395 if (state_ != kPlaying) { | |
396 // TODO(scherkus): To keep the audio hardware busy we write at most 8k of | |
397 // zeros. This gets around the tricky situation of pausing and resuming | |
398 // the audio IPC layer in Chrome. Ideally, we should return zero and then | |
399 // the subclass can restart the conversation. | |
400 // | |
401 // This should get handled by the subclass http://crbug.com/106600 | |
402 const uint32 kZeroLength = 8192; | |
403 size_t zeros_to_write = | |
404 std::min(kZeroLength, requested_frames * bytes_per_frame_); | |
405 memset(dest, 0, zeros_to_write); | |
406 return zeros_to_write / bytes_per_frame_; | |
407 } | |
408 | |
409 // Use three conditions to determine the end of playback: | |
410 // 1. Algorithm needs more audio data. | |
411 // 2. We've received an end of stream buffer. | |
412 // (received_end_of_stream_ == true) | |
413 // 3. Browser process has no audio data being played. | |
414 // There is no way to check that condition that would work for all | |
415 // derived classes, so call virtual method that would either render | |
416 // end of stream or schedule such rendering. | |
417 // | |
418 // Three conditions determine when an underflow occurs: | |
419 // 1. Algorithm has no audio data. | |
420 // 2. Currently in the kPlaying state. | |
421 // 3. Have not received an end of stream buffer. | |
422 if (algorithm_->NeedsMoreData()) { | |
423 if (received_end_of_stream_) { | |
424 // TODO(enal): schedule callback instead of polling. | |
425 if (base::Time::Now() >= earliest_end_time_) | |
426 SignalEndOfStream(); | |
427 } else if (state_ == kPlaying) { | |
428 state_ = kUnderflow; | |
429 underflow_cb = underflow_cb_; | |
430 } | |
431 } else { | |
432 // Otherwise fill the buffer. | |
433 frames_written = algorithm_->FillBuffer(dest, requested_frames); | |
434 } | |
435 } | |
436 | |
437 base::TimeDelta previous_time_buffered = audio_time_buffered_; | |
438 // The call to FillBuffer() on |algorithm_| has increased the amount of | |
439 // buffered audio data. Update the new amount of time buffered. | |
440 audio_time_buffered_ = algorithm_->GetTime(); | |
441 | |
442 if (previous_time_buffered.InMicroseconds() > 0 && | |
443 (previous_time_buffered != audio_time_buffered_ || | |
444 current_time > host()->GetTime())) { | |
445 time_cb_.Run(current_time, audio_time_buffered_); | |
446 } | |
447 | |
448 if (!underflow_cb.is_null()) | |
449 underflow_cb.Run(); | |
450 | |
451 return frames_written; | |
452 } | |
453 | |
454 void AudioRendererBase::UpdateEarliestEndTime(int bytes_filled, | |
455 base::TimeDelta request_delay, | |
456 base::Time time_now) { | |
457 if (bytes_filled != 0) { | |
458 base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled); | |
459 float playback_rate = GetPlaybackRate(); | |
460 if (playback_rate != 1.0f) { | |
461 predicted_play_time = base::TimeDelta::FromMicroseconds( | |
462 static_cast<int64>(ceil(predicted_play_time.InMicroseconds() * | |
463 playback_rate))); | |
464 } | |
465 earliest_end_time_ = | |
466 std::max(earliest_end_time_, | |
467 time_now + request_delay + predicted_play_time); | |
468 } | |
469 } | |
470 | |
471 base::TimeDelta AudioRendererBase::ConvertToDuration(int bytes) { | |
472 if (bytes_per_second_) { | |
473 return base::TimeDelta::FromMicroseconds( | |
474 base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_); | |
475 } | |
476 return base::TimeDelta(); | |
477 } | |
478 | |
479 void AudioRendererBase::OnRenderError() { | |
480 host()->DisableAudioRenderer(); | |
481 } | |
482 | |
483 } // namespace media | |
OLD | NEW |