Index: media/audio/win/audio_low_latency_output_win.cc |
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc |
index 0988eab5db26b78ef94041a4d730c6b59d2f548e..c3cf9c0967fa53802677c8c22ac3028c5672caf8 100644 |
--- a/media/audio/win/audio_low_latency_output_win.cc |
+++ b/media/audio/win/audio_low_latency_output_win.cc |
@@ -73,9 +73,9 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
audio_bus_(AudioBus::Create(params)) { |
DCHECK(manager_); |
- VLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()"; |
- VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) |
- << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled."; |
+ DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()"; |
+ DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) |
+ << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled."; |
// Load the Avrt DLL if not already loaded. Required to support MMCSS. |
bool avrt_init = avrt::Initialize(); |
@@ -104,10 +104,10 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
// get from the audio endpoint device in each render event. |
packet_size_frames_ = params.frames_per_buffer(); |
packet_size_bytes_ = params.GetBytesPerBuffer(); |
- VLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign; |
- VLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; |
- VLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; |
- VLOG(1) << "Number of milliseconds per packet: " |
+ DVLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign; |
+ DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; |
+ DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; |
+ DVLOG(1) << "Number of milliseconds per packet: " |
<< params.GetBufferDuration().InMillisecondsF(); |
// All events are auto-reset events and non-signaled initially. |
@@ -127,7 +127,7 @@ WASAPIAudioOutputStream::~WASAPIAudioOutputStream() { |
} |
bool WASAPIAudioOutputStream::Open() { |
- VLOG(1) << "WASAPIAudioOutputStream::Open()"; |
+ DVLOG(1) << "WASAPIAudioOutputStream::Open()"; |
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
if (opened_) |
return true; |
@@ -226,7 +226,7 @@ bool WASAPIAudioOutputStream::Open() { |
} |
void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
- VLOG(1) << "WASAPIAudioOutputStream::Start()"; |
+ DVLOG(1) << "WASAPIAudioOutputStream::Start()"; |
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
CHECK(callback); |
CHECK(opened_); |
@@ -271,7 +271,7 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
} |
void WASAPIAudioOutputStream::Stop() { |
- VLOG(1) << "WASAPIAudioOutputStream::Stop()"; |
+ DVLOG(1) << "WASAPIAudioOutputStream::Stop()"; |
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
if (!render_thread_) |
return; |
@@ -306,7 +306,7 @@ void WASAPIAudioOutputStream::Stop() { |
} |
void WASAPIAudioOutputStream::Close() { |
- VLOG(1) << "WASAPIAudioOutputStream::Close()"; |
+ DVLOG(1) << "WASAPIAudioOutputStream::Close()"; |
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
// It is valid to call Close() before calling open or Start(). |
@@ -319,7 +319,7 @@ void WASAPIAudioOutputStream::Close() { |
} |
void WASAPIAudioOutputStream::SetVolume(double volume) { |
- VLOG(1) << "SetVolume(volume=" << volume << ")"; |
+ DVLOG(1) << "SetVolume(volume=" << volume << ")"; |
float volume_float = static_cast<float>(volume); |
if (volume_float < 0.0f || volume_float > 1.0f) { |
return; |
@@ -328,7 +328,7 @@ void WASAPIAudioOutputStream::SetVolume(double volume) { |
} |
void WASAPIAudioOutputStream::GetVolume(double* volume) { |
- VLOG(1) << "GetVolume()"; |
+ DVLOG(1) << "GetVolume()"; |
*volume = static_cast<double>(volume_); |
} |
@@ -538,7 +538,7 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
event_handle != INVALID_HANDLE_VALUE); |
if (use_event) |
stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; |
- VLOG(2) << "stream_flags: 0x" << std::hex << stream_flags; |
+ DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags; |
// Initialize the audio stream between the client and the device. |
// For an exclusive-mode stream that uses event-driven buffering, the |
@@ -561,7 +561,7 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
UINT32 aligned_buffer_size = 0; |
client->GetBufferSize(&aligned_buffer_size); |
- VLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; |
+ DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; |
// Calculate new aligned periodicity. Each unit of reference time |
// is 100 nanoseconds. |
@@ -573,9 +573,9 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
// at this stage but we bail out with an error code instead and |
// combine it with a log message which informs about the suggested |
// aligned buffer size which should be used instead. |
- VLOG(1) << "aligned_buffer_duration: " |
- << static_cast<double>(aligned_buffer_duration / 10000.0) |
- << " [ms]"; |
+ DVLOG(1) << "aligned_buffer_duration: " |
+ << static_cast<double>(aligned_buffer_duration / 10000.0) |
+ << " [ms]"; |
} else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { |
// We will get this error if we try to use a smaller buffer size than |
// the minimum supported size (usually ~3ms on Windows 7). |
@@ -587,7 +587,7 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
if (use_event) { |
hr = client->SetEventHandle(event_handle); |
if (FAILED(hr)) { |
- VLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr; |
+ DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr; |
return hr; |
} |
} |
@@ -595,12 +595,12 @@ HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
UINT32 buffer_size_in_frames = 0; |
hr = client->GetBufferSize(&buffer_size_in_frames); |
if (FAILED(hr)) { |
- VLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr; |
+ DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr; |
return hr; |
} |
*endpoint_buffer_size = buffer_size_in_frames; |
- VLOG(2) << "endpoint buffer size: " << buffer_size_in_frames; |
+ DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames; |
return hr; |
} |