Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(60)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 89663004: Don't start WASAPI render thread until setup completes. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Add IAudioClock. Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « media/audio/win/audio_low_latency_output_win.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/audio/win/audio_low_latency_output_win.h" 5 #include "media/audio/win/audio_low_latency_output_win.h"
6 6
7 #include <Functiondiscoverykeys_devpkey.h> 7 #include <Functiondiscoverykeys_devpkey.h>
8 8
9 #include "base/command_line.h" 9 #include "base/command_line.h"
10 #include "base/debug/trace_event.h" 10 #include "base/debug/trace_event.h"
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after
227 // a rendering endpoint buffer. 227 // a rendering endpoint buffer.
228 ScopedComPtr<IAudioRenderClient> audio_render_client = 228 ScopedComPtr<IAudioRenderClient> audio_render_client =
229 CoreAudioUtil::CreateRenderClient(audio_client); 229 CoreAudioUtil::CreateRenderClient(audio_client);
230 if (!audio_render_client) 230 if (!audio_render_client)
231 return false; 231 return false;
232 232
233 // Store valid COM interfaces. 233 // Store valid COM interfaces.
234 audio_client_ = audio_client; 234 audio_client_ = audio_client;
235 audio_render_client_ = audio_render_client; 235 audio_render_client_ = audio_render_client;
236 236
237 hr = audio_client_->GetService(__uuidof(IAudioClock),
238 audio_clock_.ReceiveVoid());
239 if (FAILED(hr)) {
240 LOG(ERROR) << "Failed to get IAudioClock service.";
241 return false;
242 }
243
237 opened_ = true; 244 opened_ = true;
238 return true; 245 return true;
239 } 246 }
240 247
241 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { 248 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
242 VLOG(1) << "WASAPIAudioOutputStream::Start()"; 249 VLOG(1) << "WASAPIAudioOutputStream::Start()";
243 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); 250 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
244 CHECK(callback); 251 CHECK(callback);
245 CHECK(opened_); 252 CHECK(opened_);
246 253
247 if (render_thread_) { 254 if (render_thread_) {
248 CHECK_EQ(callback, source_); 255 CHECK_EQ(callback, source_);
249 return; 256 return;
250 } 257 }
251 258
252 source_ = callback; 259 source_ = callback;
253 260
261 // Ensure that the endpoint buffer is prepared with silence.
262 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
263 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
264 audio_client_, audio_render_client_)) {
265 LOG(ERROR) << "Failed to prepare endpoint buffers with silence.";
266 callback->OnError(this);
267 return;
268 }
269 }
270 num_written_frames_ = endpoint_buffer_size_frames_;
271
254 // Create and start the thread that will drive the rendering by waiting for 272 // Create and start the thread that will drive the rendering by waiting for
255 // render events. 273 // render events.
256 render_thread_.reset( 274 render_thread_.reset(
257 new base::DelegateSimpleThread(this, "wasapi_render_thread")); 275 new base::DelegateSimpleThread(this, "wasapi_render_thread"));
258 render_thread_->Start(); 276 render_thread_->Start();
259 if (!render_thread_->HasBeenStarted()) { 277 if (!render_thread_->HasBeenStarted()) {
260 LOG(ERROR) << "Failed to start WASAPI render thread."; 278 LOG(ERROR) << "Failed to start WASAPI render thread.";
261 StopThread(); 279 StopThread();
262 callback->OnError(this); 280 callback->OnError(this);
263 return; 281 return;
264 } 282 }
265 283
266 // Ensure that the endpoint buffer is prepared with silence.
267 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
268 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
269 audio_client_, audio_render_client_)) {
270 LOG(ERROR) << "Failed to prepare endpoint buffers with silence.";
271 StopThread();
272 callback->OnError(this);
273 return;
274 }
275 }
276 num_written_frames_ = endpoint_buffer_size_frames_;
277
278 // Start streaming data between the endpoint buffer and the audio engine. 284 // Start streaming data between the endpoint buffer and the audio engine.
279 HRESULT hr = audio_client_->Start(); 285 HRESULT hr = audio_client_->Start();
280 if (FAILED(hr)) { 286 if (FAILED(hr)) {
281 LOG_GETLASTERROR(ERROR) 287 LOG_GETLASTERROR(ERROR)
282 << "Failed to start output streaming: " << std::hex << hr; 288 << "Failed to start output streaming: " << std::hex << hr;
283 StopThread(); 289 StopThread();
284 callback->OnError(this); 290 callback->OnError(this);
285 } 291 }
286 } 292 }
287 293
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
370 } 376 }
371 377
372 HRESULT hr = S_FALSE; 378 HRESULT hr = S_FALSE;
373 379
374 bool playing = true; 380 bool playing = true;
375 bool error = false; 381 bool error = false;
376 HANDLE wait_array[] = { stop_render_event_, 382 HANDLE wait_array[] = { stop_render_event_,
377 audio_samples_render_event_ }; 383 audio_samples_render_event_ };
378 UINT64 device_frequency = 0; 384 UINT64 device_frequency = 0;
379 385
380 // The IAudioClock interface enables us to monitor a stream's data 386 // The device frequency is the frequency generated by the hardware clock in
381 // rate and the current position in the stream. Allocate it before we 387 // the audio device. The GetFrequency() method reports a constant frequency.
382 // start spinning. 388 hr = audio_clock_->GetFrequency(&device_frequency);
DaleCurtis 2013/11/27 02:45:08 I left this here, but if it's truly constant we co
henrika (OOO until Aug 14) 2013/11/27 08:24:39 Should be OK to use in Open. Can't see any cons by
383 ScopedComPtr<IAudioClock> audio_clock;
384 hr = audio_client_->GetService(__uuidof(IAudioClock),
385 audio_clock.ReceiveVoid());
386 if (SUCCEEDED(hr)) {
387 // The device frequency is the frequency generated by the hardware clock in
388 // the audio device. The GetFrequency() method reports a constant frequency.
389 hr = audio_clock->GetFrequency(&device_frequency);
390 }
391 error = FAILED(hr); 389 error = FAILED(hr);
392 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " 390 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
393 << std::hex << hr; 391 << std::hex << hr;
394 392
395 // Keep rendering audio until the stop event or the stream-switch event 393 // Keep rendering audio until the stop event or the stream-switch event
396 // is signaled. An error event can also break the main thread loop. 394 // is signaled. An error event can also break the main thread loop.
397 while (playing && !error) { 395 while (playing && !error) {
398 // Wait for a close-down event, stream-switch event or a new render event. 396 // Wait for a close-down event, stream-switch event or a new render event.
399 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), 397 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
400 wait_array, 398 wait_array,
401 FALSE, 399 FALSE,
402 INFINITE); 400 INFINITE);
403 401
404 switch (wait_result) { 402 switch (wait_result) {
405 case WAIT_OBJECT_0 + 0: 403 case WAIT_OBJECT_0 + 0:
406 // |stop_render_event_| has been set. 404 // |stop_render_event_| has been set.
407 playing = false; 405 playing = false;
408 break; 406 break;
409 case WAIT_OBJECT_0 + 1: 407 case WAIT_OBJECT_0 + 1:
410 // |audio_samples_render_event_| has been set. 408 // |audio_samples_render_event_| has been set.
411 error = !RenderAudioFromSource(audio_clock, device_frequency); 409 error = !RenderAudioFromSource(device_frequency);
412 break; 410 break;
413 default: 411 default:
414 error = true; 412 error = true;
415 break; 413 break;
416 } 414 }
417 } 415 }
418 416
419 if (playing && error) { 417 if (playing && error) {
420 // Stop audio rendering since something has gone wrong in our main thread 418 // Stop audio rendering since something has gone wrong in our main thread
421 // loop. Note that, we are still in a "started" state, hence a Stop() call 419 // loop. Note that, we are still in a "started" state, hence a Stop() call
422 // is required to join the thread properly. 420 // is required to join the thread properly.
423 audio_client_->Stop(); 421 audio_client_->Stop();
424 PLOG(ERROR) << "WASAPI rendering failed."; 422 PLOG(ERROR) << "WASAPI rendering failed.";
425 } 423 }
426 424
427 // Disable MMCSS. 425 // Disable MMCSS.
428 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { 426 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
429 PLOG(WARNING) << "Failed to disable MMCSS"; 427 PLOG(WARNING) << "Failed to disable MMCSS";
430 } 428 }
431 } 429 }
432 430
433 bool WASAPIAudioOutputStream::RenderAudioFromSource( 431 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
434 IAudioClock* audio_clock, UINT64 device_frequency) {
435 TRACE_EVENT0("audio", "RenderAudioFromSource"); 432 TRACE_EVENT0("audio", "RenderAudioFromSource");
436 433
437 HRESULT hr = S_FALSE; 434 HRESULT hr = S_FALSE;
438 UINT32 num_queued_frames = 0; 435 UINT32 num_queued_frames = 0;
439 uint8* audio_data = NULL; 436 uint8* audio_data = NULL;
440 437
441 // Contains how much new data we can write to the buffer without 438 // Contains how much new data we can write to the buffer without
442 // the risk of overwriting previously written data that the audio 439 // the risk of overwriting previously written data that the audio
443 // engine has not yet read from the buffer. 440 // engine has not yet read from the buffer.
444 size_t num_available_frames = 0; 441 size_t num_available_frames = 0;
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
496 return false; 493 return false;
497 } 494 }
498 495
499 // Derive the audio delay which corresponds to the delay between 496 // Derive the audio delay which corresponds to the delay between
500 // a render event and the time when the first audio sample in a 497 // a render event and the time when the first audio sample in a
501 // packet is played out through the speaker. This delay value 498 // packet is played out through the speaker. This delay value
502 // can typically be utilized by an acoustic echo-control (AEC) 499 // can typically be utilized by an acoustic echo-control (AEC)
503 // unit at the render side. 500 // unit at the render side.
504 UINT64 position = 0; 501 UINT64 position = 0;
505 int audio_delay_bytes = 0; 502 int audio_delay_bytes = 0;
506 hr = audio_clock->GetPosition(&position, NULL); 503 hr = audio_clock_->GetPosition(&position, NULL);
507 if (SUCCEEDED(hr)) { 504 if (SUCCEEDED(hr)) {
508 // Stream position of the sample that is currently playing 505 // Stream position of the sample that is currently playing
509 // through the speaker. 506 // through the speaker.
510 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * 507 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
511 (static_cast<double>(position) / device_frequency); 508 (static_cast<double>(position) / device_frequency);
512 509
513 // Stream position of the last sample written to the endpoint 510 // Stream position of the last sample written to the endpoint
514 // buffer. Note that, the packet we are about to receive in 511 // buffer. Note that, the packet we are about to receive in
515 // the upcoming callback is also included. 512 // the upcoming callback is also included.
516 size_t pos_last_sample_written_frames = 513 size_t pos_last_sample_written_frames =
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
646 643
647 // Ensure that we don't quit the main thread loop immediately next 644 // Ensure that we don't quit the main thread loop immediately next
648 // time Start() is called. 645 // time Start() is called.
649 ResetEvent(stop_render_event_.Get()); 646 ResetEvent(stop_render_event_.Get());
650 } 647 }
651 648
652 source_ = NULL; 649 source_ = NULL;
653 } 650 }
654 651
655 } // namespace media 652 } // namespace media
OLDNEW
« no previous file with comments | « media/audio/win/audio_low_latency_output_win.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698