Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(480)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 1276523004: Fix WASAPI restriction to be based on period size; fixes Win10. (Closed) Base URL: http://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/audio/win/audio_low_latency_output_win.h" 5 #include "media/audio/win/audio_low_latency_output_win.h"
6 6
7 #include <Functiondiscoverykeys_devpkey.h> 7 #include <Functiondiscoverykeys_devpkey.h>
8 8
9 #include "base/command_line.h" 9 #include "base/command_line.h"
10 #include "base/logging.h" 10 #include "base/logging.h"
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
165 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { 165 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
166 // Initialize the audio stream between the client and the device in shared 166 // Initialize the audio stream between the client and the device in shared
167 // mode and using event-driven buffer handling. 167 // mode and using event-driven buffer handling.
168 hr = CoreAudioUtil::SharedModeInitialize( 168 hr = CoreAudioUtil::SharedModeInitialize(
169 audio_client.get(), &format_, audio_samples_render_event_.Get(), 169 audio_client.get(), &format_, audio_samples_render_event_.Get(),
170 &endpoint_buffer_size_frames_, 170 &endpoint_buffer_size_frames_,
171 communications_device ? &kCommunicationsSessionId : NULL); 171 communications_device ? &kCommunicationsSessionId : NULL);
172 if (FAILED(hr)) 172 if (FAILED(hr))
173 return false; 173 return false;
174 174
175 // We know from experience that the best possible callback sequence is 175 REFERENCE_TIME device_period = 0;
176 // achieved when the packet size (given by the native device period) 176 if (FAILED(CoreAudioUtil::GetDevicePeriod(
177 // is an even divisor of the endpoint buffer size. 177 audio_client.get(), AUDCLNT_SHAREMODE_SHARED, &device_period))) {
178 return false;
179 }
180
181 const int preferred_frames_per_buffer = static_cast<int>(
182 format_.Format.nSamplesPerSec *
183 CoreAudioUtil::RefererenceTimeToTimeDelta(device_period)
184 .InSecondsF() +
185 0.5);
186
187 // Packet size should always be an even divisor of the device period for
188 // best performance; things will still work otherwise, but may glitch for a
189 // couple of reasons.
190 //
191 // The first reason is if/when repeated RenderAudioFromSource() hit the
192 // shared memory boundary between the renderer and the browser. The next
193 // audio buffer is always requested after the current request is consumed.
194 // With back-to-back calls the round-trip may not be fast enough and thus
195 // audio will glitch as we fail to deliver audio in a timely manner.
196 //
197 // The second reason is event wakeup efficiency. We may have too few or too
198 // many frames to fill the output buffer requested by WASAPI. If too few,
199 // we'll refuse the render event and wait until more output space is
200 // available. If we have too many frames, we'll only partially fill and
201 // wait for the next render event. In either case certain remainders may
202 // leave us unable to fulfill the request in a timely manner, thus glitches.
203 //
204 // Log a warning in these cases so we can help users in the field.
178 // Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441. 205 // Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441.
179 if (endpoint_buffer_size_frames_ % packet_size_frames_ != 0) { 206 if (preferred_frames_per_buffer % packet_size_frames_) {
180 LOG(ERROR) 207 LOG(WARNING)
181 << "Bailing out due to non-perfect timing. Buffer size of " 208 << "Using WASAPI output with a non-optimal buffer size, glitches from"
209 << " back to back shared memory reads and partial fills of WASAPI"
210 << " output buffers may occur. Buffer size of "
182 << packet_size_frames_ << " is not an even divisor of " 211 << packet_size_frames_ << " is not an even divisor of "
183 << endpoint_buffer_size_frames_; 212 << preferred_frames_per_buffer;
184 return false;
185 } 213 }
186 } else { 214 } else {
187 // TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize() 215 // TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize()
188 // when removing the enable-exclusive-audio flag. 216 // when removing the enable-exclusive-audio flag.
189 hr = ExclusiveModeInitialization(audio_client.get(), 217 hr = ExclusiveModeInitialization(audio_client.get(),
190 audio_samples_render_event_.Get(), 218 audio_samples_render_event_.Get(),
191 &endpoint_buffer_size_frames_); 219 &endpoint_buffer_size_frames_);
192 if (FAILED(hr)) 220 if (FAILED(hr))
193 return false; 221 return false;
194 222
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after
432 // a buffer from the system (triggers this event) the client must 460 // a buffer from the system (triggers this event) the client must
433 // process the entire buffer. Calls to the GetCurrentPadding method 461 // process the entire buffer. Calls to the GetCurrentPadding method
434 // are unnecessary because the packet size must always equal the 462 // are unnecessary because the packet size must always equal the
435 // buffer size. In contrast to the shared mode buffering scheme, 463 // buffer size. In contrast to the shared mode buffering scheme,
436 // the latency for an event-driven, exclusive-mode stream depends 464 // the latency for an event-driven, exclusive-mode stream depends
437 // directly on the buffer size. 465 // directly on the buffer size.
438 num_available_frames = endpoint_buffer_size_frames_; 466 num_available_frames = endpoint_buffer_size_frames_;
439 } 467 }
440 468
441 // Check if there is enough available space to fit the packet size 469 // Check if there is enough available space to fit the packet size
442 // specified by the client. 470 // specified by the client, wait until a future callback.
443 if (num_available_frames < packet_size_frames_) 471 if (num_available_frames < packet_size_frames_)
444 return true; 472 return true;
445 473
446 DLOG_IF(ERROR, num_available_frames % packet_size_frames_ != 0) 474 // Grab all available space in the rendering endpoint buffer
447 << "Non-perfect timing detected (num_available_frames=" 475 // into which the client can write a data packet.
448 << num_available_frames << ", packet_size_frames=" 476 hr = audio_render_client_->GetBuffer(packet_size_frames_,
449 << packet_size_frames_ << ")"; 477 &audio_data);
450 478 if (FAILED(hr)) {
451 // Derive the number of packets we need to get from the client to 479 DLOG(ERROR) << "Failed to use rendering audio buffer: "
452 // fill up the available area in the endpoint buffer. 480 << std::hex << hr;
453 // |num_packets| will always be one for exclusive-mode streams and 481 return false;
454 // will be one in most cases for shared mode streams as well.
455 // However, we have found that two packets can sometimes be
456 // required.
457 size_t num_packets = (num_available_frames / packet_size_frames_);
458
459 for (size_t n = 0; n < num_packets; ++n) {
460 // Grab all available space in the rendering endpoint buffer
461 // into which the client can write a data packet.
462 hr = audio_render_client_->GetBuffer(packet_size_frames_,
463 &audio_data);
464 if (FAILED(hr)) {
465 DLOG(ERROR) << "Failed to use rendering audio buffer: "
466 << std::hex << hr;
467 return false;
468 }
469
470 // Derive the audio delay which corresponds to the delay between
471 // a render event and the time when the first audio sample in a
472 // packet is played out through the speaker. This delay value
473 // can typically be utilized by an acoustic echo-control (AEC)
474 // unit at the render side.
475 UINT64 position = 0;
476 uint32 audio_delay_bytes = 0;
477 hr = audio_clock_->GetPosition(&position, NULL);
478 if (SUCCEEDED(hr)) {
479 // Stream position of the sample that is currently playing
480 // through the speaker.
481 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
482 (static_cast<double>(position) / device_frequency);
483
484 // Stream position of the last sample written to the endpoint
485 // buffer. Note that, the packet we are about to receive in
486 // the upcoming callback is also included.
487 size_t pos_last_sample_written_frames =
488 num_written_frames_ + packet_size_frames_;
489
490 // Derive the actual delay value which will be fed to the
491 // render client using the OnMoreData() callback.
492 audio_delay_bytes = (pos_last_sample_written_frames -
493 pos_sample_playing_frames) * format_.Format.nBlockAlign;
494 }
495
496 // Read a data packet from the registered client source and
497 // deliver a delay estimate in the same callback to the client.
498
499 int frames_filled = source_->OnMoreData(
500 audio_bus_.get(), audio_delay_bytes);
501 uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
502 DCHECK_LE(num_filled_bytes, packet_size_bytes_);
503
504 // Note: If this ever changes to output raw float the data must be
505 // clipped and sanitized since it may come from an untrusted
506 // source such as NaCl.
507 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
508 audio_bus_->Scale(volume_);
509 audio_bus_->ToInterleaved(
510 frames_filled, bytes_per_sample, audio_data);
511
512
513 // Release the buffer space acquired in the GetBuffer() call.
514 // Render silence if we were not able to fill up the buffer totally.
515 DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
516 AUDCLNT_BUFFERFLAGS_SILENT : 0;
517 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
518
519 num_written_frames_ += packet_size_frames_;
520 } 482 }
521 483
484 // Derive the audio delay which corresponds to the delay between
485 // a render event and the time when the first audio sample in a
486 // packet is played out through the speaker. This delay value
487 // can typically be utilized by an acoustic echo-control (AEC)
488 // unit at the render side.
489 UINT64 position = 0;
490 uint32 audio_delay_bytes = 0;
491 hr = audio_clock_->GetPosition(&position, NULL);
492 if (SUCCEEDED(hr)) {
493 // Stream position of the sample that is currently playing
494 // through the speaker.
495 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
496 (static_cast<double>(position) / device_frequency);
497
498 // Stream position of the last sample written to the endpoint
499 // buffer. Note that, the packet we are about to receive in
500 // the upcoming callback is also included.
501 size_t pos_last_sample_written_frames =
502 num_written_frames_ + packet_size_frames_;
503
504 // Derive the actual delay value which will be fed to the
505 // render client using the OnMoreData() callback.
506 audio_delay_bytes = (pos_last_sample_written_frames -
507 pos_sample_playing_frames) * format_.Format.nBlockAlign;
508 }
509
510 // Read a data packet from the registered client source and
511 // deliver a delay estimate in the same callback to the client.
512
513 int frames_filled = source_->OnMoreData(
514 audio_bus_.get(), audio_delay_bytes);
515 uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
516 DCHECK_LE(num_filled_bytes, packet_size_bytes_);
517
518 // Note: If this ever changes to output raw float the data must be
519 // clipped and sanitized since it may come from an untrusted
520 // source such as NaCl.
521 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
522 audio_bus_->Scale(volume_);
523 audio_bus_->ToInterleaved(
524 frames_filled, bytes_per_sample, audio_data);
525
526 // Release the buffer space acquired in the GetBuffer() call.
527 // Render silence if we were not able to fill up the buffer totally.
528 DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
529 AUDCLNT_BUFFERFLAGS_SILENT : 0;
530 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
531
532 num_written_frames_ += packet_size_frames_;
522 return true; 533 return true;
523 } 534 }
524 535
525 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( 536 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
526 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { 537 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) {
527 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); 538 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
528 539
529 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec; 540 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
530 REFERENCE_TIME requested_buffer_duration = 541 REFERENCE_TIME requested_buffer_duration =
531 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); 542 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
613 624
614 // Ensure that we don't quit the main thread loop immediately next 625 // Ensure that we don't quit the main thread loop immediately next
615 // time Start() is called. 626 // time Start() is called.
616 ResetEvent(stop_render_event_.Get()); 627 ResetEvent(stop_render_event_.Get());
617 } 628 }
618 629
619 source_ = NULL; 630 source_ = NULL;
620 } 631 }
621 632
622 } // namespace media 633 } // namespace media
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698