Index: media/audio/win/audio_low_latency_output_win.cc |
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc |
index 8ba88217ecc44a2454db1d6341376d4d5b14db9e..a10e67a46cbf2f9b541e68a24a163c5a2d691a24 100644 |
--- a/media/audio/win/audio_low_latency_output_win.cc |
+++ b/media/audio/win/audio_low_latency_output_win.cc |
@@ -71,9 +71,12 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
ERole device_role) |
: creating_thread_id_(base::PlatformThread::CurrentId()), |
manager_(manager), |
+ format_(), |
opened_(false), |
audio_parameters_are_valid_(false), |
volume_(1.0), |
+ packet_size_frames_(0), |
+ packet_size_bytes_(0), |
endpoint_buffer_size_frames_(0), |
device_id_(device_id), |
device_role_(device_role), |
@@ -130,11 +133,11 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
// get from the audio endpoint device in each render event. |
packet_size_frames_ = params.frames_per_buffer(); |
packet_size_bytes_ = params.GetBytesPerBuffer(); |
- packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate(); |
VLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign; |
VLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; |
VLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; |
- VLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; |
+ VLOG(1) << "Number of milliseconds per packet: " |
+ << params.GetBufferDuration().InMillisecondsF(); |
// All events are auto-reset events and non-signaled initially. |
@@ -234,6 +237,13 @@ bool WASAPIAudioOutputStream::Open() { |
audio_client_ = audio_client; |
audio_render_client_ = audio_render_client; |
+ hr = audio_client_->GetService(__uuidof(IAudioClock), |
+ audio_clock_.ReceiveVoid()); |
+ if (FAILED(hr)) { |
+ LOG(ERROR) << "Failed to get IAudioClock service."; |
+ return false; |
+ } |
+ |
opened_ = true; |
return true; |
} |
@@ -251,6 +261,17 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
source_ = callback; |
+ // Ensure that the endpoint buffer is prepared with silence. |
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
+ if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( |
+ audio_client_, audio_render_client_)) { |
+ LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; |
+ callback->OnError(this); |
+ return; |
+ } |
+ } |
+ num_written_frames_ = endpoint_buffer_size_frames_; |
+ |
// Create and start the thread that will drive the rendering by waiting for |
// render events. |
render_thread_.reset( |
@@ -263,18 +284,6 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
return; |
} |
- // Ensure that the endpoint buffer is prepared with silence. |
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
- if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( |
- audio_client_, audio_render_client_)) { |
- LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; |
- StopThread(); |
- callback->OnError(this); |
- return; |
- } |
- } |
- num_written_frames_ = endpoint_buffer_size_frames_; |
- |
// Start streaming data between the endpoint buffer and the audio engine. |
HRESULT hr = audio_client_->Start(); |
if (FAILED(hr)) { |
@@ -377,17 +386,9 @@ void WASAPIAudioOutputStream::Run() { |
audio_samples_render_event_ }; |
UINT64 device_frequency = 0; |
- // The IAudioClock interface enables us to monitor a stream's data |
- // rate and the current position in the stream. Allocate it before we |
- // start spinning. |
- ScopedComPtr<IAudioClock> audio_clock; |
- hr = audio_client_->GetService(__uuidof(IAudioClock), |
- audio_clock.ReceiveVoid()); |
- if (SUCCEEDED(hr)) { |
- // The device frequency is the frequency generated by the hardware clock in |
- // the audio device. The GetFrequency() method reports a constant frequency. |
- hr = audio_clock->GetFrequency(&device_frequency); |
- } |
+ // The device frequency is the frequency generated by the hardware clock in |
+ // the audio device. The GetFrequency() method reports a constant frequency. |
+ hr = audio_clock_->GetFrequency(&device_frequency); |
error = FAILED(hr); |
PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " |
<< std::hex << hr; |
@@ -408,7 +409,7 @@ void WASAPIAudioOutputStream::Run() { |
break; |
case WAIT_OBJECT_0 + 1: |
// |audio_samples_render_event_| has been set. |
- error = !RenderAudioFromSource(audio_clock, device_frequency); |
+ error = !RenderAudioFromSource(device_frequency); |
break; |
default: |
error = true; |
@@ -430,8 +431,7 @@ void WASAPIAudioOutputStream::Run() { |
} |
} |
-bool WASAPIAudioOutputStream::RenderAudioFromSource( |
- IAudioClock* audio_clock, UINT64 device_frequency) { |
+bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { |
TRACE_EVENT0("audio", "RenderAudioFromSource"); |
HRESULT hr = S_FALSE; |
@@ -503,7 +503,7 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource( |
// unit at the render side. |
UINT64 position = 0; |
int audio_delay_bytes = 0; |
- hr = audio_clock->GetPosition(&position, NULL); |
+ hr = audio_clock_->GetPosition(&position, NULL); |
if (SUCCEEDED(hr)) { |
// Stream position of the sample that is currently playing |
// through the speaker. |