OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
6 | 6 |
7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
8 | 8 |
9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
10 #include "base/debug/trace_event.h" | 10 #include "base/debug/trace_event.h" |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
64 | 64 |
65 return static_cast<int>(format.Format.nSamplesPerSec); | 65 return static_cast<int>(format.Format.nSamplesPerSec); |
66 } | 66 } |
67 | 67 |
68 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, | 68 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
69 const std::string& device_id, | 69 const std::string& device_id, |
70 const AudioParameters& params, | 70 const AudioParameters& params, |
71 ERole device_role) | 71 ERole device_role) |
72 : creating_thread_id_(base::PlatformThread::CurrentId()), | 72 : creating_thread_id_(base::PlatformThread::CurrentId()), |
73 manager_(manager), | 73 manager_(manager), |
| 74 format_(), |
74 opened_(false), | 75 opened_(false), |
75 audio_parameters_are_valid_(false), | 76 audio_parameters_are_valid_(false), |
76 volume_(1.0), | 77 volume_(1.0), |
| 78 packet_size_frames_(0), |
| 79 packet_size_bytes_(0), |
77 endpoint_buffer_size_frames_(0), | 80 endpoint_buffer_size_frames_(0), |
78 device_id_(device_id), | 81 device_id_(device_id), |
79 device_role_(device_role), | 82 device_role_(device_role), |
80 share_mode_(GetShareMode()), | 83 share_mode_(GetShareMode()), |
81 num_written_frames_(0), | 84 num_written_frames_(0), |
82 source_(NULL), | 85 source_(NULL), |
83 audio_bus_(AudioBus::Create(params)) { | 86 audio_bus_(AudioBus::Create(params)) { |
84 DCHECK(manager_); | 87 DCHECK(manager_); |
85 VLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()"; | 88 VLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()"; |
86 VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) | 89 VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
123 | 126 |
124 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE. | 127 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE. |
125 format_.Samples.wValidBitsPerSample = params.bits_per_sample(); | 128 format_.Samples.wValidBitsPerSample = params.bits_per_sample(); |
126 format_.dwChannelMask = CoreAudioUtil::GetChannelConfig(device_id, eRender); | 129 format_.dwChannelMask = CoreAudioUtil::GetChannelConfig(device_id, eRender); |
127 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; | 130 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; |
128 | 131 |
129 // Store size (in different units) of audio packets which we expect to | 132 // Store size (in different units) of audio packets which we expect to |
130 // get from the audio endpoint device in each render event. | 133 // get from the audio endpoint device in each render event. |
131 packet_size_frames_ = params.frames_per_buffer(); | 134 packet_size_frames_ = params.frames_per_buffer(); |
132 packet_size_bytes_ = params.GetBytesPerBuffer(); | 135 packet_size_bytes_ = params.GetBytesPerBuffer(); |
133 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate(); | |
134 VLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign; | 136 VLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign; |
135 VLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; | 137 VLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; |
136 VLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; | 138 VLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; |
137 VLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; | 139 VLOG(1) << "Number of milliseconds per packet: " |
| 140 << params.GetBufferDuration().InMillisecondsF(); |
138 | 141 |
139 // All events are auto-reset events and non-signaled initially. | 142 // All events are auto-reset events and non-signaled initially. |
140 | 143 |
141 // Create the event which the audio engine will signal each time | 144 // Create the event which the audio engine will signal each time |
142 // a buffer becomes ready to be processed by the client. | 145 // a buffer becomes ready to be processed by the client. |
143 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 146 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
144 DCHECK(audio_samples_render_event_.IsValid()); | 147 DCHECK(audio_samples_render_event_.IsValid()); |
145 | 148 |
146 // Create the event which will be set in Stop() when capturing shall stop. | 149 // Create the event which will be set in Stop() when capturing shall stop. |
147 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 150 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
227 // a rendering endpoint buffer. | 230 // a rendering endpoint buffer. |
228 ScopedComPtr<IAudioRenderClient> audio_render_client = | 231 ScopedComPtr<IAudioRenderClient> audio_render_client = |
229 CoreAudioUtil::CreateRenderClient(audio_client); | 232 CoreAudioUtil::CreateRenderClient(audio_client); |
230 if (!audio_render_client) | 233 if (!audio_render_client) |
231 return false; | 234 return false; |
232 | 235 |
233 // Store valid COM interfaces. | 236 // Store valid COM interfaces. |
234 audio_client_ = audio_client; | 237 audio_client_ = audio_client; |
235 audio_render_client_ = audio_render_client; | 238 audio_render_client_ = audio_render_client; |
236 | 239 |
| 240 hr = audio_client_->GetService(__uuidof(IAudioClock), |
| 241 audio_clock_.ReceiveVoid()); |
| 242 if (FAILED(hr)) { |
| 243 LOG(ERROR) << "Failed to get IAudioClock service."; |
| 244 return false; |
| 245 } |
| 246 |
237 opened_ = true; | 247 opened_ = true; |
238 return true; | 248 return true; |
239 } | 249 } |
240 | 250 |
241 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { | 251 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
242 VLOG(1) << "WASAPIAudioOutputStream::Start()"; | 252 VLOG(1) << "WASAPIAudioOutputStream::Start()"; |
243 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 253 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
244 CHECK(callback); | 254 CHECK(callback); |
245 CHECK(opened_); | 255 CHECK(opened_); |
246 | 256 |
247 if (render_thread_) { | 257 if (render_thread_) { |
248 CHECK_EQ(callback, source_); | 258 CHECK_EQ(callback, source_); |
249 return; | 259 return; |
250 } | 260 } |
251 | 261 |
252 source_ = callback; | 262 source_ = callback; |
253 | 263 |
| 264 // Ensure that the endpoint buffer is prepared with silence. |
| 265 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| 266 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( |
| 267 audio_client_, audio_render_client_)) { |
| 268 LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; |
| 269 callback->OnError(this); |
| 270 return; |
| 271 } |
| 272 } |
| 273 num_written_frames_ = endpoint_buffer_size_frames_; |
| 274 |
254 // Create and start the thread that will drive the rendering by waiting for | 275 // Create and start the thread that will drive the rendering by waiting for |
255 // render events. | 276 // render events. |
256 render_thread_.reset( | 277 render_thread_.reset( |
257 new base::DelegateSimpleThread(this, "wasapi_render_thread")); | 278 new base::DelegateSimpleThread(this, "wasapi_render_thread")); |
258 render_thread_->Start(); | 279 render_thread_->Start(); |
259 if (!render_thread_->HasBeenStarted()) { | 280 if (!render_thread_->HasBeenStarted()) { |
260 LOG(ERROR) << "Failed to start WASAPI render thread."; | 281 LOG(ERROR) << "Failed to start WASAPI render thread."; |
261 StopThread(); | 282 StopThread(); |
262 callback->OnError(this); | 283 callback->OnError(this); |
263 return; | 284 return; |
264 } | 285 } |
265 | 286 |
266 // Ensure that the endpoint buffer is prepared with silence. | |
267 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | |
268 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( | |
269 audio_client_, audio_render_client_)) { | |
270 LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; | |
271 StopThread(); | |
272 callback->OnError(this); | |
273 return; | |
274 } | |
275 } | |
276 num_written_frames_ = endpoint_buffer_size_frames_; | |
277 | |
278 // Start streaming data between the endpoint buffer and the audio engine. | 287 // Start streaming data between the endpoint buffer and the audio engine. |
279 HRESULT hr = audio_client_->Start(); | 288 HRESULT hr = audio_client_->Start(); |
280 if (FAILED(hr)) { | 289 if (FAILED(hr)) { |
281 LOG_GETLASTERROR(ERROR) | 290 LOG_GETLASTERROR(ERROR) |
282 << "Failed to start output streaming: " << std::hex << hr; | 291 << "Failed to start output streaming: " << std::hex << hr; |
283 StopThread(); | 292 StopThread(); |
284 callback->OnError(this); | 293 callback->OnError(this); |
285 } | 294 } |
286 } | 295 } |
287 | 296 |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
370 } | 379 } |
371 | 380 |
372 HRESULT hr = S_FALSE; | 381 HRESULT hr = S_FALSE; |
373 | 382 |
374 bool playing = true; | 383 bool playing = true; |
375 bool error = false; | 384 bool error = false; |
376 HANDLE wait_array[] = { stop_render_event_, | 385 HANDLE wait_array[] = { stop_render_event_, |
377 audio_samples_render_event_ }; | 386 audio_samples_render_event_ }; |
378 UINT64 device_frequency = 0; | 387 UINT64 device_frequency = 0; |
379 | 388 |
380 // The IAudioClock interface enables us to monitor a stream's data | 389 // The device frequency is the frequency generated by the hardware clock in |
381 // rate and the current position in the stream. Allocate it before we | 390 // the audio device. The GetFrequency() method reports a constant frequency. |
382 // start spinning. | 391 hr = audio_clock_->GetFrequency(&device_frequency); |
383 ScopedComPtr<IAudioClock> audio_clock; | |
384 hr = audio_client_->GetService(__uuidof(IAudioClock), | |
385 audio_clock.ReceiveVoid()); | |
386 if (SUCCEEDED(hr)) { | |
387 // The device frequency is the frequency generated by the hardware clock in | |
388 // the audio device. The GetFrequency() method reports a constant frequency. | |
389 hr = audio_clock->GetFrequency(&device_frequency); | |
390 } | |
391 error = FAILED(hr); | 392 error = FAILED(hr); |
392 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " | 393 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " |
393 << std::hex << hr; | 394 << std::hex << hr; |
394 | 395 |
395 // Keep rendering audio until the stop event or the stream-switch event | 396 // Keep rendering audio until the stop event or the stream-switch event |
396 // is signaled. An error event can also break the main thread loop. | 397 // is signaled. An error event can also break the main thread loop. |
397 while (playing && !error) { | 398 while (playing && !error) { |
398 // Wait for a close-down event, stream-switch event or a new render event. | 399 // Wait for a close-down event, stream-switch event or a new render event. |
399 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), | 400 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), |
400 wait_array, | 401 wait_array, |
401 FALSE, | 402 FALSE, |
402 INFINITE); | 403 INFINITE); |
403 | 404 |
404 switch (wait_result) { | 405 switch (wait_result) { |
405 case WAIT_OBJECT_0 + 0: | 406 case WAIT_OBJECT_0 + 0: |
406 // |stop_render_event_| has been set. | 407 // |stop_render_event_| has been set. |
407 playing = false; | 408 playing = false; |
408 break; | 409 break; |
409 case WAIT_OBJECT_0 + 1: | 410 case WAIT_OBJECT_0 + 1: |
410 // |audio_samples_render_event_| has been set. | 411 // |audio_samples_render_event_| has been set. |
411 error = !RenderAudioFromSource(audio_clock, device_frequency); | 412 error = !RenderAudioFromSource(device_frequency); |
412 break; | 413 break; |
413 default: | 414 default: |
414 error = true; | 415 error = true; |
415 break; | 416 break; |
416 } | 417 } |
417 } | 418 } |
418 | 419 |
419 if (playing && error) { | 420 if (playing && error) { |
420 // Stop audio rendering since something has gone wrong in our main thread | 421 // Stop audio rendering since something has gone wrong in our main thread |
421 // loop. Note that, we are still in a "started" state, hence a Stop() call | 422 // loop. Note that, we are still in a "started" state, hence a Stop() call |
422 // is required to join the thread properly. | 423 // is required to join the thread properly. |
423 audio_client_->Stop(); | 424 audio_client_->Stop(); |
424 PLOG(ERROR) << "WASAPI rendering failed."; | 425 PLOG(ERROR) << "WASAPI rendering failed."; |
425 } | 426 } |
426 | 427 |
427 // Disable MMCSS. | 428 // Disable MMCSS. |
428 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { | 429 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
429 PLOG(WARNING) << "Failed to disable MMCSS"; | 430 PLOG(WARNING) << "Failed to disable MMCSS"; |
430 } | 431 } |
431 } | 432 } |
432 | 433 |
433 bool WASAPIAudioOutputStream::RenderAudioFromSource( | 434 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { |
434 IAudioClock* audio_clock, UINT64 device_frequency) { | |
435 TRACE_EVENT0("audio", "RenderAudioFromSource"); | 435 TRACE_EVENT0("audio", "RenderAudioFromSource"); |
436 | 436 |
437 HRESULT hr = S_FALSE; | 437 HRESULT hr = S_FALSE; |
438 UINT32 num_queued_frames = 0; | 438 UINT32 num_queued_frames = 0; |
439 uint8* audio_data = NULL; | 439 uint8* audio_data = NULL; |
440 | 440 |
441 // Contains how much new data we can write to the buffer without | 441 // Contains how much new data we can write to the buffer without |
442 // the risk of overwriting previously written data that the audio | 442 // the risk of overwriting previously written data that the audio |
443 // engine has not yet read from the buffer. | 443 // engine has not yet read from the buffer. |
444 size_t num_available_frames = 0; | 444 size_t num_available_frames = 0; |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
496 return false; | 496 return false; |
497 } | 497 } |
498 | 498 |
499 // Derive the audio delay which corresponds to the delay between | 499 // Derive the audio delay which corresponds to the delay between |
500 // a render event and the time when the first audio sample in a | 500 // a render event and the time when the first audio sample in a |
501 // packet is played out through the speaker. This delay value | 501 // packet is played out through the speaker. This delay value |
502 // can typically be utilized by an acoustic echo-control (AEC) | 502 // can typically be utilized by an acoustic echo-control (AEC) |
503 // unit at the render side. | 503 // unit at the render side. |
504 UINT64 position = 0; | 504 UINT64 position = 0; |
505 int audio_delay_bytes = 0; | 505 int audio_delay_bytes = 0; |
506 hr = audio_clock->GetPosition(&position, NULL); | 506 hr = audio_clock_->GetPosition(&position, NULL); |
507 if (SUCCEEDED(hr)) { | 507 if (SUCCEEDED(hr)) { |
508 // Stream position of the sample that is currently playing | 508 // Stream position of the sample that is currently playing |
509 // through the speaker. | 509 // through the speaker. |
510 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | 510 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * |
511 (static_cast<double>(position) / device_frequency); | 511 (static_cast<double>(position) / device_frequency); |
512 | 512 |
513 // Stream position of the last sample written to the endpoint | 513 // Stream position of the last sample written to the endpoint |
514 // buffer. Note that, the packet we are about to receive in | 514 // buffer. Note that, the packet we are about to receive in |
515 // the upcoming callback is also included. | 515 // the upcoming callback is also included. |
516 size_t pos_last_sample_written_frames = | 516 size_t pos_last_sample_written_frames = |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
646 | 646 |
647 // Ensure that we don't quit the main thread loop immediately next | 647 // Ensure that we don't quit the main thread loop immediately next |
648 // time Start() is called. | 648 // time Start() is called. |
649 ResetEvent(stop_render_event_.Get()); | 649 ResetEvent(stop_render_event_.Get()); |
650 } | 650 } |
651 | 651 |
652 source_ = NULL; | 652 source_ = NULL; |
653 } | 653 } |
654 | 654 |
655 } // namespace media | 655 } // namespace media |
OLD | NEW |