Chromium Code Reviews| Index: media/audio/win/audio_low_latency_output_win.cc |
| =================================================================== |
| --- media/audio/win/audio_low_latency_output_win.cc (revision 0) |
| +++ media/audio/win/audio_low_latency_output_win.cc (revision 0) |
| @@ -0,0 +1,571 @@ |
| +// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "media/audio/win/audio_low_latency_output_win.h" |
| + |
| +#include "base/logging.h" |
| +#include "base/memory/scoped_ptr.h" |
| +#include "base/utf_string_conversions.h" |
| +#include "media/audio/audio_util.h" |
| +#include "media/audio/win/audio_manager_win.h" |
| +#include "media/audio/win/avrt_wrapper_win.h" |
| + |
| +using base::win::ScopedComPtr; |
| +using base::win::ScopedCOMInitializer; |
| + |
| +WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
| + const AudioParameters& params, |
| + ERole device_role) |
| + : com_init_(ScopedCOMInitializer::kMTA), |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
do you want to add a check to the constructor (as
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Done.
|
| + manager_(manager), |
| + render_thread_(NULL), |
| + opened_(false), |
| + started_(false), |
| + volume_(1.0), |
| + endpoint_buffer_size_frames_(0), |
| + device_role_(device_role), |
| + num_written_frames_(0), |
| + source_(NULL) { |
| + DCHECK(manager_); |
| + |
| + // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
| + bool avrt_init = avrt::Initialize(); |
| + DCHECK(avrt_init) << "Failed to load the Avrt.dll"; |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
nit: "Failed to load avrt.dll"
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Done.
|
| + |
| + // Set up the desired render format specified by the client. |
| + format_.nSamplesPerSec = params.sample_rate; |
| + format_.wFormatTag = WAVE_FORMAT_PCM; |
| + format_.wBitsPerSample = params.bits_per_sample; |
| + format_.nChannels = params.channels; |
| + format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; |
| + format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; |
| + format_.cbSize = 0; |
| + |
| + // Size in bytes of each audio frame. |
| + frame_size_ = format_.nBlockAlign; |
| + |
| + // Store size (in different units) of audio packets which we expect to |
| + // get from the audio endpoint device in each render event. |
| + packet_size_frames_ = params.GetPacketSize() / format_.nBlockAlign; |
|
Raymond Toy (Google)
2011/11/03 22:49:56
Is it possible for GetPacketSize() to return somet
henrika (OOO until Aug 14)
2011/11/04 11:26:15
The result will always be equal to params.samples_
Raymond Toy (Google)
2011/11/04 16:58:02
No, this is fine.
|
| + packet_size_bytes_ = params.GetPacketSize(); |
| + packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate; |
| + DVLOG(1) << "Number of bytes per audio frame : " << frame_size_; |
| + DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; |
| + DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; |
| + |
| + // All events are auto-reset events and non-signaled initially. |
| + |
| + // Create the event which the audio engine will signal each time |
| + // a buffer becomes ready to be processed by the client. |
| + audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| + DCHECK(audio_samples_render_event_.IsValid()); |
| + |
| + // Create the event which will be set in Stop() when capturing shall stop. |
| + stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| + DCHECK(stop_render_event_.IsValid()); |
| +} |
| + |
| +WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} |
| + |
| +bool WASAPIAudioOutputStream::Open() { |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
ponder: Should we DCHECK in these methods (Open, S
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Good point. Makes sense here since we need a certa
|
| + // Verify that we are not already opened. |
| + if (opened_) |
| + return false; |
| + |
| + // Obtain a reference to the IMMDevice interface of the default rendering |
| + // device with the specified role. |
| + HRESULT hr = SetRenderDevice(device_role_); |
| + if (FAILED(hr)) { |
| + HandleError(hr); |
| + return false; |
| + } |
| + |
| + // Obtain an IAudioClient interface which enables us to create and initialize |
| + // an audio stream between an audio application and the audio engine. |
| + hr = ActivateRenderDevice(); |
| + if (FAILED(hr)) { |
| + HandleError(hr); |
| + return false; |
| + } |
| + |
| + // Retrieve the stream format which the audio engine uses for its internal |
| + // processing/mixing of shared-mode streams. |
| + hr = GetAudioEngineStreamFormat(); |
| + if (FAILED(hr)) { |
| + HandleError(hr); |
| + return false; |
| + } |
| + |
| + // Verify that the selected audio endpoint supports the specified format |
| + // set during construction. |
| + if (!DesiredFormatIsSupported()) { |
| + hr = E_INVALIDARG; |
| + HandleError(hr); |
| + return false; |
| + } |
| + |
| + // Initialize the audio stream between the client and the device using |
| + // shared mode and a lowest possible glitch-free latency. |
| + hr = InitializeAudioEngine(); |
| + if (FAILED(hr)) { |
| + HandleError(hr); |
| + return false; |
| + } |
| + |
| + opened_ = true; |
| + |
| + return true; |
| +} |
| + |
| +void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
| + DCHECK(callback); |
| + DCHECK(opened_); |
| + |
| + if (!opened_) |
| + return; |
| + |
| + if (started_) |
| + return; |
| + |
| + source_ = callback; |
| + |
| + // Create and start the thread that will drive the rendering by waiting for |
| + // render events. |
| + render_thread_ = new base::DelegateSimpleThread(this, "wasapi_render_thread"); |
| + render_thread_->Start(); |
|
Niklas Enbom
2011/11/03 15:32:09
Why start the thread already here? What if followi
henrika (OOO until Aug 14)
2011/11/04 11:26:15
Good point, will modify.
|
| + |
| + // Avoid start-up glitches by filling up the endpoint buffer with "silence" |
| + // before starting the stream. |
| + BYTE* data_ptr = NULL; |
| + HRESULT hr = audio_render_client_->GetBuffer(endpoint_buffer_size_frames_, |
| + &data_ptr); |
| + if (SUCCEEDED(hr)) { |
| + // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to |
| + // explicitly write silence data to the rendering buffer. |
| + audio_render_client_->ReleaseBuffer(endpoint_buffer_size_frames_, |
| + AUDCLNT_BUFFERFLAGS_SILENT); |
| + num_written_frames_ = endpoint_buffer_size_frames_; |
| + |
| + // Sanity check: verify that the endpoint buffer is filled with silence. |
| + UINT32 num_queued_frames = 0; |
| + audio_client_->GetCurrentPadding(&num_queued_frames); |
| + DCHECK(num_queued_frames == num_written_frames_); |
| + } |
| + |
| + // Start streaming data between the endpoint buffer and the audio engine. |
| + hr = audio_client_->Start(); |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
If this fails, should we call HandleError()?
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Improved:
if (FAILED(hr)) {
SetEvent(stop_re
|
| + DLOG_IF(ERROR, FAILED(hr)) << "Failed to start output streaming: " |
| + << std::hex << hr; |
| + |
| + started_ = SUCCEEDED(hr); |
| +} |
| + |
| +void WASAPIAudioOutputStream::Stop() { |
| + if (!started_) |
| + return; |
| + |
| + // Shut down the render thread. |
| + if (stop_render_event_.IsValid()) { |
| + SetEvent(stop_render_event_.Get()); |
| + } |
| + |
| + // Stop output audio streaming. |
| + HRESULT hr = audio_client_->Stop(); |
| + DLOG_IF(ERROR, FAILED(hr)) << "Failed to stop output streaming: " |
| + << std::hex << hr; |
| + |
| + // Wait until the thread completes and perform cleanup. |
| + if (render_thread_) { |
| + SetEvent(stop_render_event_.Get()); |
| + render_thread_->Join(); |
| + render_thread_ = NULL; |
| + } |
| + |
| + started_ = false; |
| +} |
| + |
| +void WASAPIAudioOutputStream::Close() { |
| + // It is valid to call Close() before calling open or Start(). |
| + // It is also valid to call Close() after Start() has been called. |
| + Stop(); |
| + |
| + // Inform the audio manager that we have been closed. This will cause our |
| + // destruction. |
| + manager_->ReleaseOutputStream(this); |
| +} |
| + |
| +void WASAPIAudioOutputStream::SetVolume(double volume) { |
| + if (volume < 0.0f || volume > 1.0f) |
| + return; |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
is this something we expect to happen? If this sh
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Done.
|
| + volume_ = static_cast<float>(volume); |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
nit: casting from double to float isn't exact, so
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Fixed ;-)
|
| +} |
| + |
| +void WASAPIAudioOutputStream::GetVolume(double* volume) { |
| + *volume = static_cast<double>(volume_); |
| +} |
| + |
| +// static |
| +double WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { |
| + // It is assumed that this static method is called from a COM thread, i.e., |
| + // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. |
| + ScopedComPtr<IMMDeviceEnumerator> enumerator; |
| + HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
| + NULL, |
| + CLSCTX_INPROC_SERVER, |
| + __uuidof(IMMDeviceEnumerator), |
| + enumerator.ReceiveVoid()); |
| + if (FAILED(hr)) { |
| + NOTREACHED() << "error code: " << std::hex << hr; |
| + } |
|
Raymond Toy (Google)
2011/11/03 22:49:56
Should we return 0.0 here, like we do in other NOT
henrika (OOO until Aug 14)
2011/11/04 11:26:15
Fixed.
|
| + |
| + ScopedComPtr<IMMDevice> endpoint_device; |
| + hr = enumerator->GetDefaultAudioEndpoint(eRender, |
| + device_role, |
| + endpoint_device.Receive()); |
| + if (FAILED(hr)) { |
| + // This will happen if there's no audio output device found or available |
| + // (e.g. some audio cards that have outputs will still report them as |
| + // "not found" when no speaker is plugged into the output jack). |
| + LOG(WARNING) << "No audio end point: " << std::hex << hr; |
| + return 0.0; |
| + } |
| + |
| + ScopedComPtr<IAudioClient> audio_client; |
| + hr = endpoint_device->Activate(__uuidof(IAudioClient), |
| + CLSCTX_INPROC_SERVER, |
| + NULL, |
| + audio_client.ReceiveVoid()); |
| + if (FAILED(hr)) { |
| + NOTREACHED() << "error code: " << std::hex << hr; |
| + return 0.0; |
| + } |
| + |
| + base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; |
| + hr = audio_client->GetMixFormat(&audio_engine_mix_format); |
| + if (FAILED(hr)) { |
| + NOTREACHED() << "error code: " << std::hex << hr; |
| + return 0.0; |
| + } |
| + |
| + return static_cast<double>(audio_engine_mix_format->nSamplesPerSec); |
| +} |
| + |
| +void WASAPIAudioOutputStream::Run() { |
| + ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); |
| + |
| + // Increase the thread priority. |
| + render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); |
| + |
| + // Enable MMCSS to ensure that this thread receives prioritized access to |
| + // CPU resources. |
| + DWORD task_index = 0; |
| + HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", |
| + &task_index); |
| + bool mmcss_is_ok = |
| + (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
| + if (!mmcss_is_ok) { |
| + // Failed to enable MMCSS on this thread. It is not fatal but can lead |
| + // to reduced QoS at high load. |
| + DWORD err = GetLastError(); |
| + LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
| + } |
| + |
| + HRESULT hr = S_FALSE; |
| + |
| + bool playing = true; |
| + bool error = false; |
| + HANDLE wait_array[2] = {stop_render_event_, audio_samples_render_event_}; |
| + UINT64 device_frequency = 0; |
| + |
| + // The IAudioClock interface enables us to monitor a stream's data |
| + // rate and the current position in the stream. Allocate it before we |
| + // start spinning. |
| + ScopedComPtr<IAudioClock> audio_clock; |
| + hr = audio_client_->GetService(__uuidof(IAudioClock), |
| + audio_clock.ReceiveVoid()); |
| + if (SUCCEEDED(hr)) { |
| + // The device frequency is the frequency generated by the hardware clock in |
| + // the audio device. The GetFrequency() method reports a constant frequency. |
| + hr = audio_clock->GetFrequency(&device_frequency); |
| + } |
| + error = FAILED(hr); |
|
Raymond Toy (Google)
2011/11/03 22:49:56
It's a little confusing here that hr can be the re
henrika (OOO until Aug 14)
2011/11/04 11:26:15
If GetService() fails, error is true and the threa
|
| + PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " |
| + << std::hex << hr; |
| + |
| + while (playing && !error) { |
| + // Wait for a close-down event or a new render event. |
| + DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); |
| + |
| + switch (wait_result) { |
| + case WAIT_OBJECT_0 + 0: |
| + // |stop_render_event_| has been set. |
| + playing = false; |
| + break; |
| + case WAIT_OBJECT_0 + 1: |
| + { |
| + // |audio_samples_render_event_| has been set. |
| + UINT32 num_queued_frames = 0; |
| + uint8* audio_data = NULL; |
| + |
| + // Get the padding value which represents the amount of rendering |
| + // data that is queued up to play in the endpoint buffer. |
| + hr = audio_client_->GetCurrentPadding(&num_queued_frames); |
| + |
| + // Determine how much new data we can write to the buffer without |
| + // the risk of overwriting previously written data that the audio |
| + // engine has not yet read from the buffer. |
| + size_t num_available_frames = |
| + endpoint_buffer_size_frames_ - num_queued_frames; |
| + |
| + // Check if there is enough available space to fit the packet size |
| + // specified by the client. |
| + if (num_available_frames < packet_size_frames_) { |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
nit: remove {}
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Done.
|
| + continue; |
| + } |
| + |
| + // Derive the number of packets we need get from the client to |
| + // fill up the available area in the endpoint buffer. |
| + size_t num_packets = (num_available_frames / packet_size_frames_); |
| + |
| + // Get data from the client/source. |
| + for (size_t n = 0; n < num_packets; n++) { |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
++n
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Done.
|
| + // Grab all available space in the rendering endpoint buffer |
| + // into which the client can write a data packet. |
| + hr = audio_render_client_->GetBuffer(packet_size_frames_, |
| + &audio_data); |
| + |
| + // Derive the audio delay which corresponds to the delay between |
| + // a render event and the time when the first audio sample in a |
| + // packet is played out through the speaker. This delay value |
| + // can typically be utilized by an acoustic echo-control (AEC) |
| + // unit at the render side. |
| + if (SUCCEEDED(hr) && audio_data) { |
|
Raymond Toy (Google)
2011/11/03 22:49:56
What happens if GetBuffer doesn't succeed? Is the
henrika (OOO until Aug 14)
2011/11/04 11:26:15
It is an extremely rare event and I have not been
tommi (sloooow) - chröme
2011/11/07 11:47:03
if it does fail though, should error be set to tru
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Done.
|
| + UINT64 position = 0; |
| + int audio_delay_bytes = 0; |
| + hr = audio_clock->GetPosition(&position, NULL); |
| + if (SUCCEEDED(hr)) { |
|
Raymond Toy (Google)
2011/11/03 22:49:56
Same thing. What happens if GetPosition doesn't s
henrika (OOO until Aug 14)
2011/11/04 11:26:15
The user will receive 0 as audio delay and can see
tommi (sloooow) - chröme
2011/11/07 11:47:03
and set error to true?
henrika (OOO until Aug 14)
2011/11/08 12:18:37
I don't want to stop rendering even if delay estim
|
| + // Stream position of the sample that is currently playing |
| + // through the speaker. |
| + double pos_sample_playing_frames = format_.nSamplesPerSec * |
| + (static_cast<double>(position) / device_frequency); |
| + |
| + // Stream position of the last sample written to the endpoint |
| + // buffer. Note that, the packet we are about to receive in |
| + // the upcoming callback is also included. |
| + size_t pos_last_sample_written_frames = |
| + num_written_frames_ + packet_size_frames_; |
| + |
| + // Derive the actual delay value which will be fed to the |
| + // render client using the OnMoreData() callback. |
| + audio_delay_bytes = (pos_last_sample_written_frames - |
| + pos_sample_playing_frames) * frame_size_; |
| + } |
| + |
| + // Read a data packet from the registered client source and |
| + // deliver a delay estimate in the same callback to the client. |
| + // A time stamp is also stored in the AudioBuffersState. This |
| + // time stamp can be used at the client side to compensate for |
| + // the delay between the usage of the delay value and the time |
| + // of generation. |
| + uint32 num_filled_bytes = source_->OnMoreData( |
| + this, audio_data, packet_size_bytes_, |
| + AudioBuffersState(0, audio_delay_bytes)); |
| + |
| + // Perform in-place, software-volume adjustments. |
| + media::AdjustVolume(audio_data, |
| + num_filled_bytes, |
| + format_.nChannels, |
| + format_.wBitsPerSample >> 3, |
| + volume_); |
| + |
| + // Zero out the part of the packet which has not been filled by |
| + // the client. |
| + if (num_filled_bytes < packet_size_bytes_) { |
| + memset(&audio_data[num_filled_bytes], |
| + 0, |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
move to the line above
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Done.
|
| + (packet_size_bytes_ - num_filled_bytes)); |
|
Niklas Enbom
2011/11/03 15:46:47
Why, since silence is better than repeat in case o
henrika (OOO until Aug 14)
2011/11/04 11:26:15
Do you mean that you would like me to add better c
henrika (OOO until Aug 14)
2011/11/07 12:37:40
Modified the comment.
|
| + } |
| + } |
| + |
| + // Release the buffer space acquired in the GetBuffer() call. |
| + DWORD flags(0); |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
flags = 0
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Done.
|
| + hr = audio_render_client_->ReleaseBuffer(packet_size_frames_, |
| + flags); |
| + |
| + num_written_frames_ += packet_size_frames_; |
| + } |
| + } |
| + break; |
| + default: |
| + error = true; |
| + break; |
| + } |
| + } |
| + |
| + if (playing && error) { |
| + // TODO(henrika): perhaps it worth improving the cleanup here by e.g. |
| + // stopping the audio client, joining the thread etc.? |
| + NOTREACHED() << "WASAPI rendering failed with error code " |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
PLOG(ERROR)?
NOTREACHED is a debug-only assert for
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Thanks. Improved this section.
|
| + << GetLastError(); |
| + } |
| + |
| + // Disable MMCSS. |
| + if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
| + PLOG(WARNING) << "Failed to disable MMCSS"; |
| + } |
| +} |
| + |
| +void WASAPIAudioOutputStream::HandleError(HRESULT err) { |
| + NOTREACHED() << "Error code: " << std::hex << err; |
| + if (source_) |
| + source_->OnError(this, static_cast<int>(err)); |
| +} |
| + |
| +HRESULT WASAPIAudioOutputStream::SetRenderDevice(ERole device_role) { |
| + ScopedComPtr<IMMDeviceEnumerator> enumerator; |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
Assert called on correct thread?
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Not needed. Called from Open(), DCHECK() is done t
|
| + HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
| + NULL, |
| + CLSCTX_INPROC_SERVER, |
| + __uuidof(IMMDeviceEnumerator), |
| + enumerator.ReceiveVoid()); |
| + if (SUCCEEDED(hr)) { |
| + // Retrieve the default render audio endpoint for the specified role. |
| + // Note that, in Windows Vista, the MMDevice API supports device roles |
| + // but the system-supplied user interface programs do not. |
| + hr = enumerator->GetDefaultAudioEndpoint(eRender, |
| + device_role, |
| + endpoint_device_.Receive()); |
| + |
| + // Verify that the audio endpoint device is active. That is, the audio |
| + // adapter that connects to the endpoint device is present and enabled. |
| + DWORD state = DEVICE_STATE_DISABLED; |
| + hr = endpoint_device_->GetState(&state); |
| + if (SUCCEEDED(hr)) { |
| + if (!(state & DEVICE_STATE_ACTIVE)) { |
| + DLOG(ERROR) << "Selected render device is not active."; |
| + hr = E_ACCESSDENIED; |
| + } |
| + } |
| + } |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
else log error + hr?
henrika (OOO until Aug 14)
2011/11/08 12:18:37
The called does this as a result of hr. Should be
|
| + |
| + return hr; |
|
Raymond Toy (Google)
2011/11/03 22:49:56
hr can contain the error code from up to 3 differe
henrika (OOO until Aug 14)
2011/11/07 12:37:40
The Open() call consist of five sub functions wher
|
| +} |
| + |
| +HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() { |
| + // Creates and activates an IAudioClient COM object given the selected |
| + // render endpoint device. |
| + HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), |
| + CLSCTX_INPROC_SERVER, |
| + NULL, |
| + audio_client_.ReceiveVoid()); |
| + return hr; |
| +} |
| + |
| +HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() { |
| + // Retrieve the stream format that the audio engine uses for its internal |
| + // processing/mixing of shared-mode streams. |
| + return audio_client_->GetMixFormat(&audio_engine_mix_format_); |
| +} |
| + |
| +bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { |
| + // In shared mode, the audio engine always supports the mix format, |
| + // which is stored in the |audio_engine_mix_format_| member. In addition, |
| + // the audio engine *might* support similar formats that have the same |
| + // sample rate and number of channels as the mix format but differ in |
| + // the representation of audio sample values. |
| + base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
| + HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, |
| + &format_, |
| + &closest_match); |
| + DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " |
| + << "but a closest match exists."; |
| + return (hr == S_OK); |
| +} |
| + |
| +HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { |
| + // TODO(henrika): this buffer scheme is still under development. |
| + // The exact details are yet to be determined based on tests with different |
| + // audio clients. |
| + int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); |
| + if (audio_engine_mix_format_->nSamplesPerSec == 48000) { |
| + // Initial tests have shown that we have to add 10 ms extra to |
| + // ensure that we don't run empty for any packet size. |
| + glitch_free_buffer_size_ms += 10; |
| + } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { |
| + // Initial tests have shown that we have to add 20 ms extra to |
| + // ensure that we don't run empty for any packet size. |
| + glitch_free_buffer_size_ms += 20; |
| + } else { |
| + glitch_free_buffer_size_ms += 20; |
| + } |
| + DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; |
| + REFERENCE_TIME requested_buffer_duration_hns = |
| + static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); |
|
tommi (sloooow) - chröme
2011/11/07 11:47:03
indent
henrika (OOO until Aug 14)
2011/11/08 12:18:37
Done.
|
| + |
| + // Initialize the audio stream between the client and the device. |
| + // We connect indirectly through the audio engine by using shared mode |
| + // and WASAPI is initialized in an event driven mode. |
| + // Note that this API ensures that the buffer is never smaller than the |
| + // minimum buffer size needed to ensure glitch-free rendering. |
| + // If we requests a buffer size that is smaller than the audio engine's |
| + // minimum required buffer size, the method sets the buffer size to this |
| + // minimum buffer size rather than to the buffer size requested. |
| + HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, |
| + AUDCLNT_STREAMFLAGS_EVENTCALLBACK | |
| + AUDCLNT_STREAMFLAGS_NOPERSIST, |
| + requested_buffer_duration_hns, |
| + 0, |
| + &format_, |
| + NULL); |
| + if (FAILED(hr)) |
| + return hr; |
| + |
| + // Retrieve the length of the endpoint buffer shared between the client |
| + // and the audio engine. The buffer length the buffer length determines |
| + // the maximum amount of rendering data that the client can write to |
| + // the endpoint buffer during a single processing pass. |
| + // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. |
| + hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); |
| + if (FAILED(hr)) |
| + return hr; |
| + DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ |
| + << " [frames]"; |
| +#ifndef NDEBUG |
| + // The period between processing passes by the audio engine is fixed for a |
| + // particular audio endpoint device and represents the smallest processing |
| + // quantum for the audio engine. This period plus the stream latency between |
| + // the buffer and endpoint device represents the minimum possible latency |
| + // that an audio application can achieve. |
| + REFERENCE_TIME device_period_shared_mode = 0; |
| + REFERENCE_TIME device_period_exclusive_mode = 0; |
| + HRESULT hr_dbg = audio_client_->GetDevicePeriod( |
| + &device_period_shared_mode, &device_period_exclusive_mode); |
| + if (SUCCEEDED(hr_dbg)) { |
| + DVLOG(1) << "device period: " |
| + << static_cast<double>(device_period_shared_mode / 10000.0) |
| + << " [ms]"; |
| + } |
| + |
| + REFERENCE_TIME latency = 0; |
| + hr_dbg = audio_client_->GetStreamLatency(&latency); |
| + if (SUCCEEDED(hr_dbg)) { |
| + DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) |
| + << " [ms]"; |
| + } |
| +#endif |
| + |
| + // Set the event handle that the audio engine will signal each time |
| + // a buffer becomes ready to be processed by the client. |
| + hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); |
| + if (FAILED(hr)) |
| + return hr; |
| + |
| + // Get access to the IAudioRenderClient interface. This interface |
| + // enables us to write output data to a rendering endpoint buffer. |
| + // The methods in this interface manage the movement of data packets |
| + // that contain audio-rendering data. |
| + hr = audio_client_->GetService(__uuidof(IAudioRenderClient), |
| + audio_render_client_.ReceiveVoid()); |
| + return hr; |
|
Raymond Toy (Google)
2011/11/03 22:49:56
As above, hr can be the result of several differen
henrika (OOO until Aug 14)
2011/11/07 12:37:40
Failure is enough. The HRESULT code will contain t
|
| +} |
| Property changes on: media\audio\win\audio_low_latency_output_win.cc |
| ___________________________________________________________________ |
| Added: svn:eol-style |
| + LF |