OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/audio_renderer_impl.h" | 5 #include "content/renderer/media/audio_renderer_impl.h" |
6 | 6 |
7 #include <math.h> | 7 #include <math.h> |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 | 10 |
11 #include "base/bind.h" | 11 #include "base/bind.h" |
12 #include "base/command_line.h" | |
13 #include "content/common/child_process.h" | 12 #include "content/common/child_process.h" |
14 #include "content/common/media/audio_messages.h" | 13 #include "content/common/media/audio_messages.h" |
15 #include "content/public/common/content_switches.h" | |
16 #include "content/renderer/render_thread_impl.h" | 14 #include "content/renderer/render_thread_impl.h" |
17 #include "media/audio/audio_buffers_state.h" | 15 #include "media/audio/audio_buffers_state.h" |
18 #include "media/audio/audio_output_controller.h" | |
19 #include "media/audio/audio_util.h" | 16 #include "media/audio/audio_util.h" |
20 #include "media/base/filter_host.h" | |
21 | 17 |
22 // Static variable that says what code path we are using -- low or high | 18 // We define GetBufferSizeForSampleRate() instead of using |
23 // latency. Made separate variable so we don't have to go to command line | 19 // GetAudioHardwareBufferSize() in audio_util because we're using |
24 // for every DCHECK(). | 20 // the AUDIO_PCM_LINEAR flag, instead of AUDIO_PCM_LOW_LATENCY, |
25 AudioRendererImpl::LatencyType AudioRendererImpl::latency_type_ = | 21 // which the audio_util functions assume. |
26 AudioRendererImpl::kUninitializedLatency; | 22 // |
| 23 // See: http://code.google.com/p/chromium/issues/detail?id=103627 |
| 24 // for a more detailed description of the subtleties. |
| 25 static size_t GetBufferSizeForSampleRate(int sample_rate) { |
| 26 // kNominalBufferSize has been tested on Windows, Mac OS X, and Linux |
| 27 // using the low-latency audio codepath (SyncSocket implementation) |
| 28 // with the AUDIO_PCM_LINEAR flag. |
| 29 const size_t kNominalBufferSize = 2048; |
| 30 |
| 31 if (sample_rate <= 48000) |
| 32 return kNominalBufferSize; |
| 33 else if (sample_rate <= 96000) |
| 34 return kNominalBufferSize * 2; |
| 35 return kNominalBufferSize * 4; |
| 36 } |
27 | 37 |
28 AudioRendererImpl::AudioRendererImpl() | 38 AudioRendererImpl::AudioRendererImpl() |
29 : AudioRendererBase(), | 39 : AudioRendererBase(), |
30 bytes_per_second_(0), | 40 bytes_per_second_(0), |
31 stream_id_(0), | 41 stopped_(false) { |
32 shared_memory_(NULL), | 42 // We create the AudioDevice here because it must be created in the |
33 shared_memory_size_(0), | 43 // main thread. But we don't yet know the audio format (sample-rate, etc.) |
34 stopped_(false), | 44 // at this point. Later, when OnInitialize() is called, we have |
35 pending_request_(false) { | 45 // the audio format information and call the AudioDevice::Initialize() |
36 filter_ = RenderThreadImpl::current()->audio_message_filter(); | 46 // method to fully initialize it. |
37 // Figure out if we are planning to use high or low latency code path. | 47 audio_device_ = new AudioDevice(); |
38 // We are initializing only one variable and double initialization is Ok, | |
39 // so there would not be any issues caused by CPU memory model. | |
40 if (latency_type_ == kUninitializedLatency) { | |
41 // Urgent workaround for | |
42 // http://code.google.com/p/chromium-os/issues/detail?id=21491 | |
43 // TODO(enal): Fix it properly. | |
44 #if defined(OS_CHROMEOS) | |
45 latency_type_ = kHighLatency; | |
46 #else | |
47 if (!CommandLine::ForCurrentProcess()->HasSwitch( | |
48 switches::kHighLatencyAudio)) { | |
49 latency_type_ = kLowLatency; | |
50 } else { | |
51 latency_type_ = kHighLatency; | |
52 } | |
53 #endif | |
54 } | |
55 } | 48 } |
56 | 49 |
57 AudioRendererImpl::~AudioRendererImpl() { | 50 AudioRendererImpl::~AudioRendererImpl() { |
58 } | 51 } |
59 | 52 |
60 // static | |
61 void AudioRendererImpl::set_latency_type(LatencyType latency_type) { | |
62 DCHECK_EQ(kUninitializedLatency, latency_type_); | |
63 latency_type_ = latency_type; | |
64 } | |
65 | |
66 base::TimeDelta AudioRendererImpl::ConvertToDuration(int bytes) { | 53 base::TimeDelta AudioRendererImpl::ConvertToDuration(int bytes) { |
67 if (bytes_per_second_) { | 54 if (bytes_per_second_) { |
68 return base::TimeDelta::FromMicroseconds( | 55 return base::TimeDelta::FromMicroseconds( |
69 base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_); | 56 base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_); |
70 } | 57 } |
71 return base::TimeDelta(); | 58 return base::TimeDelta(); |
72 } | 59 } |
73 | 60 |
74 void AudioRendererImpl::UpdateEarliestEndTime(int bytes_filled, | 61 void AudioRendererImpl::UpdateEarliestEndTime(int bytes_filled, |
75 base::TimeDelta request_delay, | 62 base::TimeDelta request_delay, |
76 base::Time time_now) { | 63 base::Time time_now) { |
77 if (bytes_filled != 0) { | 64 if (bytes_filled != 0) { |
78 base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled); | 65 base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled); |
79 float playback_rate = GetPlaybackRate(); | 66 float playback_rate = GetPlaybackRate(); |
80 if (playback_rate != 1.0f) { | 67 if (playback_rate != 1.0f) { |
81 predicted_play_time = base::TimeDelta::FromMicroseconds( | 68 predicted_play_time = base::TimeDelta::FromMicroseconds( |
82 static_cast<int64>(ceil(predicted_play_time.InMicroseconds() * | 69 static_cast<int64>(ceil(predicted_play_time.InMicroseconds() * |
83 playback_rate))); | 70 playback_rate))); |
84 } | 71 } |
85 earliest_end_time_ = | 72 earliest_end_time_ = |
86 std::max(earliest_end_time_, | 73 std::max(earliest_end_time_, |
87 time_now + request_delay + predicted_play_time); | 74 time_now + request_delay + predicted_play_time); |
88 } | 75 } |
89 } | 76 } |
90 | 77 |
91 bool AudioRendererImpl::OnInitialize(int bits_per_channel, | 78 bool AudioRendererImpl::OnInitialize(int bits_per_channel, |
92 ChannelLayout channel_layout, | 79 ChannelLayout channel_layout, |
93 int sample_rate) { | 80 int sample_rate) { |
94 AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, channel_layout, | 81 // We use the AUDIO_PCM_LINEAR flag because AUDIO_PCM_LOW_LATENCY |
95 sample_rate, bits_per_channel, 0); | 82 // does not currently support all the sample-rates that we require. |
| 83 // Please see: http://code.google.com/p/chromium/issues/detail?id=103627 |
| 84 // for more details. |
| 85 audio_parameters_ = AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, |
| 86 channel_layout, |
| 87 sample_rate, |
| 88 bits_per_channel, |
| 89 0); |
96 | 90 |
97 bytes_per_second_ = params.GetBytesPerSecond(); | 91 bytes_per_second_ = audio_parameters_.GetBytesPerSecond(); |
98 | 92 |
99 ChildProcess::current()->io_message_loop()->PostTask( | 93 DCHECK(audio_device_.get()); |
100 FROM_HERE, | 94 |
101 base::Bind(&AudioRendererImpl::CreateStreamTask, this, params)); | 95 if (!audio_device_->IsInitialized()) { |
| 96 audio_device_->Initialize( |
| 97 GetBufferSizeForSampleRate(sample_rate), |
| 98 audio_parameters_.channels, |
| 99 audio_parameters_.sample_rate, |
| 100 audio_parameters_.format, |
| 101 this); |
| 102 |
| 103 audio_device_->Start(); |
| 104 } |
| 105 |
102 return true; | 106 return true; |
103 } | 107 } |
104 | 108 |
105 void AudioRendererImpl::OnStop() { | 109 void AudioRendererImpl::OnStop() { |
106 // Since joining with the audio thread can acquire lock_, we make sure to | 110 if (stopped_) |
107 // Join() with it not under lock. | 111 return; |
108 base::DelegateSimpleThread* audio_thread = NULL; | |
109 { | |
110 base::AutoLock auto_lock(lock_); | |
111 if (stopped_) | |
112 return; | |
113 stopped_ = true; | |
114 | 112 |
115 DCHECK_EQ(!audio_thread_.get(), !socket_.get()); | 113 DCHECK(audio_device_.get()); |
116 if (socket_.get()) | 114 audio_device_->Stop(); |
117 socket_->Close(); | |
118 if (audio_thread_.get()) | |
119 audio_thread = audio_thread_.get(); | |
120 | 115 |
121 ChildProcess::current()->io_message_loop()->PostTask( | 116 stopped_ = true; |
122 FROM_HERE, | |
123 base::Bind(&AudioRendererImpl::DestroyTask, this)); | |
124 } | |
125 | |
126 if (audio_thread) | |
127 audio_thread->Join(); | |
128 } | |
129 | |
130 void AudioRendererImpl::NotifyDataAvailableIfNecessary() { | |
131 if (latency_type_ == kHighLatency) { | |
132 // Post a task to render thread to notify a packet reception. | |
133 ChildProcess::current()->io_message_loop()->PostTask( | |
134 FROM_HERE, | |
135 base::Bind(&AudioRendererImpl::NotifyPacketReadyTask, this)); | |
136 } | |
137 } | 117 } |
138 | 118 |
139 void AudioRendererImpl::ConsumeAudioSamples( | 119 void AudioRendererImpl::ConsumeAudioSamples( |
140 scoped_refptr<media::Buffer> buffer_in) { | 120 scoped_refptr<media::Buffer> buffer_in) { |
141 base::AutoLock auto_lock(lock_); | |
142 if (stopped_) | 121 if (stopped_) |
143 return; | 122 return; |
144 | 123 |
145 // TODO(hclam): handle end of stream here. | 124 // TODO(hclam): handle end of stream here. |
146 | 125 |
147 // Use the base class to queue the buffer. | 126 // Use the base class to queue the buffer. |
148 AudioRendererBase::ConsumeAudioSamples(buffer_in); | 127 AudioRendererBase::ConsumeAudioSamples(buffer_in); |
149 | |
150 NotifyDataAvailableIfNecessary(); | |
151 } | 128 } |
152 | 129 |
153 void AudioRendererImpl::SetPlaybackRate(float rate) { | 130 void AudioRendererImpl::SetPlaybackRate(float rate) { |
154 DCHECK_LE(0.0f, rate); | 131 DCHECK_LE(0.0f, rate); |
155 | 132 |
156 base::AutoLock auto_lock(lock_); | |
157 // Handle the case where we stopped due to IO message loop dying. | 133 // Handle the case where we stopped due to IO message loop dying. |
158 if (stopped_) { | 134 if (stopped_) { |
159 AudioRendererBase::SetPlaybackRate(rate); | 135 AudioRendererBase::SetPlaybackRate(rate); |
160 return; | 136 return; |
161 } | 137 } |
162 | 138 |
163 // We have two cases here: | 139 // We have two cases here: |
164 // Play: GetPlaybackRate() == 0.0 && rate != 0.0 | 140 // Play: GetPlaybackRate() == 0.0 && rate != 0.0 |
165 // Pause: GetPlaybackRate() != 0.0 && rate == 0.0 | 141 // Pause: GetPlaybackRate() != 0.0 && rate == 0.0 |
166 if (GetPlaybackRate() == 0.0f && rate != 0.0f) { | 142 if (GetPlaybackRate() == 0.0f && rate != 0.0f) { |
167 ChildProcess::current()->io_message_loop()->PostTask( | 143 DoPlay(); |
168 FROM_HERE, | |
169 base::Bind(&AudioRendererImpl::PlayTask, this)); | |
170 } else if (GetPlaybackRate() != 0.0f && rate == 0.0f) { | 144 } else if (GetPlaybackRate() != 0.0f && rate == 0.0f) { |
171 // Pause is easy, we can always pause. | 145 // Pause is easy, we can always pause. |
172 ChildProcess::current()->io_message_loop()->PostTask( | 146 DoPause(); |
173 FROM_HERE, | |
174 base::Bind(&AudioRendererImpl::PauseTask, this)); | |
175 } | 147 } |
176 AudioRendererBase::SetPlaybackRate(rate); | 148 AudioRendererBase::SetPlaybackRate(rate); |
177 | |
178 // If we are playing, give a kick to try fulfilling the packet request as | |
179 // the previous packet request may be stalled by a pause. | |
180 if (rate > 0.0f) { | |
181 NotifyDataAvailableIfNecessary(); | |
182 } | |
183 } | 149 } |
184 | 150 |
185 void AudioRendererImpl::Pause(const base::Closure& callback) { | 151 void AudioRendererImpl::Pause(const base::Closure& callback) { |
186 AudioRendererBase::Pause(callback); | 152 AudioRendererBase::Pause(callback); |
187 base::AutoLock auto_lock(lock_); | |
188 if (stopped_) | 153 if (stopped_) |
189 return; | 154 return; |
190 | 155 |
191 ChildProcess::current()->io_message_loop()->PostTask( | 156 DoPause(); |
192 FROM_HERE, | |
193 base::Bind(&AudioRendererImpl::PauseTask, this)); | |
194 } | 157 } |
195 | 158 |
196 void AudioRendererImpl::Seek(base::TimeDelta time, | 159 void AudioRendererImpl::Seek(base::TimeDelta time, |
197 const media::FilterStatusCB& cb) { | 160 const media::FilterStatusCB& cb) { |
198 AudioRendererBase::Seek(time, cb); | 161 AudioRendererBase::Seek(time, cb); |
199 base::AutoLock auto_lock(lock_); | |
200 if (stopped_) | 162 if (stopped_) |
201 return; | 163 return; |
202 | 164 |
203 ChildProcess::current()->io_message_loop()->PostTask( | 165 DoSeek(); |
204 FROM_HERE, | |
205 base::Bind(&AudioRendererImpl::SeekTask, this)); | |
206 } | 166 } |
207 | 167 |
208 | |
209 void AudioRendererImpl::Play(const base::Closure& callback) { | 168 void AudioRendererImpl::Play(const base::Closure& callback) { |
210 AudioRendererBase::Play(callback); | 169 AudioRendererBase::Play(callback); |
211 base::AutoLock auto_lock(lock_); | |
212 if (stopped_) | 170 if (stopped_) |
213 return; | 171 return; |
214 | 172 |
215 if (GetPlaybackRate() != 0.0f) { | 173 if (GetPlaybackRate() != 0.0f) { |
216 ChildProcess::current()->io_message_loop()->PostTask( | 174 DoPlay(); |
217 FROM_HERE, | |
218 base::Bind(&AudioRendererImpl::PlayTask, this)); | |
219 } else { | 175 } else { |
220 ChildProcess::current()->io_message_loop()->PostTask( | 176 DoPause(); |
221 FROM_HERE, | |
222 base::Bind(&AudioRendererImpl::PauseTask, this)); | |
223 } | 177 } |
224 } | 178 } |
225 | 179 |
226 void AudioRendererImpl::SetVolume(float volume) { | 180 void AudioRendererImpl::SetVolume(float volume) { |
227 base::AutoLock auto_lock(lock_); | |
228 if (stopped_) | 181 if (stopped_) |
229 return; | 182 return; |
230 ChildProcess::current()->io_message_loop()->PostTask( | 183 DCHECK(audio_device_.get()); |
231 FROM_HERE, | 184 audio_device_->SetVolume(volume); |
232 base::Bind(&AudioRendererImpl::SetVolumeTask, this, volume)); | |
233 } | 185 } |
234 | 186 |
235 void AudioRendererImpl::OnCreated(base::SharedMemoryHandle handle, | 187 void AudioRendererImpl::DoPlay() { |
236 uint32 length) { | 188 earliest_end_time_ = base::Time::Now(); |
237 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | 189 DCHECK(audio_device_.get()); |
238 DCHECK_EQ(kHighLatency, latency_type_); | 190 audio_device_->Play(); |
239 | |
240 base::AutoLock auto_lock(lock_); | |
241 if (stopped_) | |
242 return; | |
243 | |
244 shared_memory_.reset(new base::SharedMemory(handle, false)); | |
245 shared_memory_->Map(length); | |
246 shared_memory_size_ = length; | |
247 } | 191 } |
248 | 192 |
249 void AudioRendererImpl::CreateSocket(base::SyncSocket::Handle socket_handle) { | 193 void AudioRendererImpl::DoPause() { |
250 DCHECK_EQ(kLowLatency, latency_type_); | 194 DCHECK(audio_device_.get()); |
251 #if defined(OS_WIN) | 195 audio_device_->Pause(false); |
252 DCHECK(socket_handle); | |
253 #else | |
254 DCHECK_GE(socket_handle, 0); | |
255 #endif | |
256 socket_.reset(new base::SyncSocket(socket_handle)); | |
257 } | 196 } |
258 | 197 |
259 void AudioRendererImpl::CreateAudioThread() { | 198 void AudioRendererImpl::DoSeek() { |
260 DCHECK_EQ(kLowLatency, latency_type_); | 199 earliest_end_time_ = base::Time::Now(); |
261 audio_thread_.reset( | |
262 new base::DelegateSimpleThread(this, "renderer_audio_thread")); | |
263 audio_thread_->Start(); | |
264 } | |
265 | 200 |
266 void AudioRendererImpl::OnLowLatencyCreated( | 201 // Pause and flush the stream when we seek to a new location. |
267 base::SharedMemoryHandle handle, | 202 DCHECK(audio_device_.get()); |
268 base::SyncSocket::Handle socket_handle, | 203 audio_device_->Pause(true); |
269 uint32 length) { | |
270 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
271 DCHECK_EQ(kLowLatency, latency_type_); | |
272 #if defined(OS_WIN) | |
273 DCHECK(handle); | |
274 #else | |
275 DCHECK_GE(handle.fd, 0); | |
276 #endif | |
277 DCHECK_NE(0u, length); | |
278 | |
279 base::AutoLock auto_lock(lock_); | |
280 if (stopped_) | |
281 return; | |
282 | |
283 shared_memory_.reset(new base::SharedMemory(handle, false)); | |
284 shared_memory_->Map(media::TotalSharedMemorySizeInBytes(length)); | |
285 shared_memory_size_ = length; | |
286 | |
287 CreateSocket(socket_handle); | |
288 CreateAudioThread(); | |
289 } | |
290 | |
291 void AudioRendererImpl::OnRequestPacket(AudioBuffersState buffers_state) { | |
292 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
293 DCHECK_EQ(kHighLatency, latency_type_); | |
294 { | |
295 base::AutoLock auto_lock(lock_); | |
296 DCHECK(!pending_request_); | |
297 pending_request_ = true; | |
298 request_buffers_state_ = buffers_state; | |
299 } | |
300 | |
301 // Try to fill in the fulfill the packet request. | |
302 NotifyPacketReadyTask(); | |
303 } | |
304 | |
305 void AudioRendererImpl::OnStateChanged(AudioStreamState state) { | |
306 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
307 | |
308 base::AutoLock auto_lock(lock_); | |
309 if (stopped_) | |
310 return; | |
311 | |
312 switch (state) { | |
313 case kAudioStreamError: | |
314 // We receive this error if we counter an hardware error on the browser | |
315 // side. We can proceed with ignoring the audio stream. | |
316 // TODO(hclam): We need more handling of these kind of error. For example | |
317 // re-try creating the audio output stream on the browser side or fail | |
318 // nicely and report to demuxer that the whole audio stream is discarded. | |
319 host()->DisableAudioRenderer(); | |
320 break; | |
321 // TODO(hclam): handle these events. | |
322 case kAudioStreamPlaying: | |
323 case kAudioStreamPaused: | |
324 break; | |
325 default: | |
326 NOTREACHED(); | |
327 break; | |
328 } | |
329 } | |
330 | |
331 void AudioRendererImpl::OnVolume(double volume) { | |
332 // TODO(hclam): decide whether we need to report the current volume to | |
333 // pipeline. | |
334 } | |
335 | |
336 void AudioRendererImpl::CreateStreamTask(const AudioParameters& audio_params) { | |
337 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
338 | |
339 base::AutoLock auto_lock(lock_); | |
340 if (stopped_) | |
341 return; | |
342 | |
343 // Make sure we don't call create more than once. | |
344 DCHECK_EQ(0, stream_id_); | |
345 stream_id_ = filter_->AddDelegate(this); | |
346 ChildProcess::current()->io_message_loop()->AddDestructionObserver(this); | |
347 | |
348 AudioParameters params_to_send(audio_params); | |
349 // Let the browser choose packet size. | |
350 params_to_send.samples_per_packet = 0; | |
351 | |
352 Send(new AudioHostMsg_CreateStream(stream_id_, | |
353 params_to_send, | |
354 latency_type_ == kLowLatency)); | |
355 } | |
356 | |
357 void AudioRendererImpl::PlayTask() { | |
358 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
359 | |
360 earliest_end_time_ = base::Time::Now(); | |
361 Send(new AudioHostMsg_PlayStream(stream_id_)); | |
362 } | |
363 | |
364 void AudioRendererImpl::PauseTask() { | |
365 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
366 | |
367 Send(new AudioHostMsg_PauseStream(stream_id_)); | |
368 } | |
369 | |
370 void AudioRendererImpl::SeekTask() { | |
371 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
372 | |
373 earliest_end_time_ = base::Time::Now(); | |
374 // We have to pause the audio stream before we can flush. | |
375 Send(new AudioHostMsg_PauseStream(stream_id_)); | |
376 Send(new AudioHostMsg_FlushStream(stream_id_)); | |
377 } | |
378 | |
379 void AudioRendererImpl::DestroyTask() { | |
380 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
381 | |
382 // Make sure we don't call destroy more than once. | |
383 DCHECK_NE(0, stream_id_); | |
384 filter_->RemoveDelegate(stream_id_); | |
385 Send(new AudioHostMsg_CloseStream(stream_id_)); | |
386 // During shutdown this may be NULL; don't worry about deregistering in that | |
387 // case. | |
388 if (ChildProcess::current()) | |
389 ChildProcess::current()->io_message_loop()->RemoveDestructionObserver(this); | |
390 stream_id_ = 0; | |
391 } | |
392 | |
393 void AudioRendererImpl::SetVolumeTask(double volume) { | |
394 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
395 | |
396 base::AutoLock auto_lock(lock_); | |
397 if (stopped_) | |
398 return; | |
399 Send(new AudioHostMsg_SetVolume(stream_id_, volume)); | |
400 } | |
401 | |
402 void AudioRendererImpl::NotifyPacketReadyTask() { | |
403 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | |
404 DCHECK_EQ(kHighLatency, latency_type_); | |
405 | |
406 base::AutoLock auto_lock(lock_); | |
407 if (stopped_) | |
408 return; | |
409 if (pending_request_ && GetPlaybackRate() > 0.0f) { | |
410 DCHECK(shared_memory_.get()); | |
411 | |
412 // Adjust the playback delay. | |
413 base::Time current_time = base::Time::Now(); | |
414 | |
415 base::TimeDelta request_delay = | |
416 ConvertToDuration(request_buffers_state_.total_bytes()); | |
417 | |
418 // Add message delivery delay. | |
419 if (current_time > request_buffers_state_.timestamp) { | |
420 base::TimeDelta receive_latency = | |
421 current_time - request_buffers_state_.timestamp; | |
422 | |
423 // If the receive latency is too much it may offset all the delay. | |
424 if (receive_latency >= request_delay) { | |
425 request_delay = base::TimeDelta(); | |
426 } else { | |
427 request_delay -= receive_latency; | |
428 } | |
429 } | |
430 | |
431 // Finally we need to adjust the delay according to playback rate. | |
432 if (GetPlaybackRate() != 1.0f) { | |
433 request_delay = base::TimeDelta::FromMicroseconds( | |
434 static_cast<int64>(ceil(request_delay.InMicroseconds() * | |
435 GetPlaybackRate()))); | |
436 } | |
437 | |
438 bool buffer_empty = (request_buffers_state_.pending_bytes == 0) && | |
439 (current_time >= earliest_end_time_); | |
440 | |
441 // For high latency mode we don't write length into shared memory, | |
442 // it is explicit part of AudioHostMsg_NotifyPacketReady() message, | |
443 // so no need to reserve first word of buffer for length. | |
444 uint32 filled = FillBuffer(static_cast<uint8*>(shared_memory_->memory()), | |
445 shared_memory_size_, request_delay, | |
446 buffer_empty); | |
447 UpdateEarliestEndTime(filled, request_delay, current_time); | |
448 pending_request_ = false; | |
449 | |
450 // Then tell browser process we are done filling into the buffer. | |
451 Send(new AudioHostMsg_NotifyPacketReady(stream_id_, filled)); | |
452 } | |
453 } | 204 } |
454 | 205 |
455 void AudioRendererImpl::WillDestroyCurrentMessageLoop() { | 206 void AudioRendererImpl::WillDestroyCurrentMessageLoop() { |
456 DCHECK(!ChildProcess::current() || // During shutdown. | 207 DCHECK(!ChildProcess::current() || // During shutdown. |
457 (MessageLoop::current() == | 208 (MessageLoop::current() == |
458 ChildProcess::current()->io_message_loop())); | 209 ChildProcess::current()->io_message_loop())); |
459 | 210 |
460 // We treat the IO loop going away the same as stopping. | 211 // We treat the IO loop going away the same as stopping. |
461 base::AutoLock auto_lock(lock_); | |
462 if (stopped_) | 212 if (stopped_) |
463 return; | 213 return; |
464 | 214 |
465 stopped_ = true; | 215 stopped_ = true; |
466 DestroyTask(); | |
467 } | |
468 | 216 |
469 // Our audio thread runs here. We receive requests for more data and send it | 217 // During shutdown this may be NULL; don't worry about deregistering in that |
470 // on this thread. | 218 // case. |
471 void AudioRendererImpl::Run() { | 219 if (ChildProcess::current()) |
472 DCHECK_EQ(kLowLatency, latency_type_); | 220 ChildProcess::current()->io_message_loop()->RemoveDestructionObserver(this); |
473 audio_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); | |
474 | 221 |
475 int bytes; | 222 if (audio_device_.get()) { |
476 while (sizeof(bytes) == socket_->Receive(&bytes, sizeof(bytes))) { | 223 audio_device_->Stop(); |
477 if (bytes == media::AudioOutputController::kPauseMark) { | 224 audio_device_ = NULL; |
478 // When restarting playback, host should get new data, | |
479 // not what is currently in the buffer. | |
480 media::SetActualDataSizeInBytes(shared_memory_.get(), | |
481 shared_memory_size_, | |
482 0); | |
483 continue; | |
484 } | |
485 else if (bytes < 0) | |
486 break; | |
487 base::AutoLock auto_lock(lock_); | |
488 if (stopped_) | |
489 break; | |
490 float playback_rate = GetPlaybackRate(); | |
491 if (playback_rate <= 0.0f) | |
492 continue; | |
493 DCHECK(shared_memory_.get()); | |
494 base::TimeDelta request_delay = ConvertToDuration(bytes); | |
495 | |
496 // We need to adjust the delay according to playback rate. | |
497 if (playback_rate != 1.0f) { | |
498 request_delay = base::TimeDelta::FromMicroseconds( | |
499 static_cast<int64>(ceil(request_delay.InMicroseconds() * | |
500 playback_rate))); | |
501 } | |
502 base::Time time_now = base::Time::Now(); | |
503 uint32 size = FillBuffer(static_cast<uint8*>(shared_memory_->memory()), | |
504 shared_memory_size_, | |
505 request_delay, | |
506 time_now >= earliest_end_time_); | |
507 media::SetActualDataSizeInBytes(shared_memory_.get(), | |
508 shared_memory_size_, | |
509 size); | |
510 UpdateEarliestEndTime(size, request_delay, time_now); | |
511 } | 225 } |
512 } | 226 } |
513 | 227 |
514 void AudioRendererImpl::Send(IPC::Message* message) { | 228 void AudioRendererImpl::Render(const std::vector<float*>& audio_data, |
515 filter_->Send(message); | 229 size_t number_of_frames, |
| 230 size_t audio_delay_milliseconds) { |
| 231 if (stopped_ || GetPlaybackRate() == 0.0f) { |
| 232 // Output silence if stopped. |
| 233 for (size_t i = 0; i < audio_data.size(); ++i) |
| 234 memset(audio_data[i], 0, sizeof(float) * number_of_frames); |
| 235 return; |
| 236 } |
| 237 |
| 238 // Adjust the playback delay. |
| 239 base::Time current_time = base::Time::Now(); |
| 240 |
| 241 base::TimeDelta request_delay = |
| 242 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); |
| 243 |
| 244 // Finally we need to adjust the delay according to playback rate. |
| 245 if (GetPlaybackRate() != 1.0f) { |
| 246 request_delay = base::TimeDelta::FromMicroseconds( |
| 247 static_cast<int64>(ceil(request_delay.InMicroseconds() * |
| 248 GetPlaybackRate()))); |
| 249 } |
| 250 |
| 251 uint32 bytes_per_frame = |
| 252 audio_parameters_.bits_per_sample * audio_parameters_.channels / 8; |
| 253 |
| 254 const size_t buf_size = number_of_frames * bytes_per_frame; |
| 255 scoped_array<uint8> buf(new uint8[buf_size]); |
| 256 |
| 257 base::Time time_now = base::Time::Now(); |
| 258 uint32 filled = FillBuffer(buf.get(), |
| 259 buf_size, |
| 260 request_delay, |
| 261 time_now >= earliest_end_time_); |
| 262 DCHECK_LE(filled, buf_size); |
| 263 |
| 264 uint32 filled_frames = filled / bytes_per_frame; |
| 265 |
| 266 // Deinterleave each audio channel. |
| 267 int channels = audio_data.size(); |
| 268 for (int channel_index = 0; channel_index < channels; ++channel_index) { |
| 269 media::DeinterleaveAudioChannel(buf.get(), |
| 270 audio_data[channel_index], |
| 271 channels, |
| 272 channel_index, |
| 273 bytes_per_frame / channels, |
| 274 filled_frames); |
| 275 |
| 276 // If FillBuffer() didn't give us enough data then zero out the remainder. |
| 277 if (filled_frames < number_of_frames) { |
| 278 int frames_to_zero = number_of_frames - filled_frames; |
| 279 memset(audio_data[channel_index], 0, sizeof(float) * frames_to_zero); |
| 280 } |
| 281 } |
516 } | 282 } |
OLD | NEW |