| Index: media/audio/win/audio_low_latency_input_win.cc
|
| diff --git a/media/audio/win/audio_low_latency_input_win.cc b/media/audio/win/audio_low_latency_input_win.cc
|
| index 7fde2322dc991814794b094da32bc744a20b7c94..c757774625a712ce7f5966ecc7a566d46dcc5611 100644
|
| --- a/media/audio/win/audio_low_latency_input_win.cc
|
| +++ b/media/audio/win/audio_low_latency_input_win.cc
|
| @@ -16,14 +16,21 @@ using base::win::ScopedCOMInitializer;
|
| namespace media {
|
|
|
| WASAPIAudioInputStream::WASAPIAudioInputStream(
|
| - AudioManagerWin* manager, const AudioParameters& params,
|
| + AudioManagerWin* manager,
|
| + const AudioParameters& params,
|
| const std::string& device_id)
|
| : manager_(manager),
|
| capture_thread_(NULL),
|
| opened_(false),
|
| started_(false),
|
| + frame_size_(0),
|
| + packet_size_frames_(0),
|
| + packet_size_bytes_(0),
|
| endpoint_buffer_size_frames_(0),
|
| + effects_(params.effects()),
|
| device_id_(device_id),
|
| + perf_count_to_100ns_units_(0.0),
|
| + ms_to_frame_count_(0.0),
|
| sink_(NULL) {
|
| DCHECK(manager_);
|
|
|
| @@ -67,8 +74,7 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(
|
| perf_count_to_100ns_units_ =
|
| (10000000.0 / static_cast<double>(performance_frequency.QuadPart));
|
| } else {
|
| - LOG(ERROR) << "High-resolution performance counters are not supported.";
|
| - perf_count_to_100ns_units_ = 0.0;
|
| + DLOG(ERROR) << "High-resolution performance counters are not supported.";
|
| }
|
| }
|
|
|
| @@ -238,25 +244,28 @@ double WASAPIAudioInputStream::GetVolume() {
|
| }
|
|
|
| // static
|
| -int WASAPIAudioInputStream::HardwareSampleRate(
|
| +AudioParameters WASAPIAudioInputStream::GetInputStreamParameters(
|
| const std::string& device_id) {
|
| - base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
|
| - HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
|
| - if (FAILED(hr))
|
| - return 0;
|
| -
|
| - return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
|
| -}
|
| + int sample_rate = 48000;
|
| + ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
|
|
|
| -// static
|
| -uint32 WASAPIAudioInputStream::HardwareChannelCount(
|
| - const std::string& device_id) {
|
| base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
|
| - HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
|
| - if (FAILED(hr))
|
| - return 0;
|
| + if (SUCCEEDED(GetMixFormat(device_id, &audio_engine_mix_format))) {
|
| + sample_rate = static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
|
| + channel_layout = audio_engine_mix_format->nChannels == 1 ?
|
| + CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
|
| + }
|
| +
|
| + int effects = AudioParameters::NO_EFFECTS;
|
| + // For non-loopback devices, advertise that ducking is supported.
|
| + if (device_id != AudioManagerBase::kLoopbackInputDeviceId)
|
| + effects |= AudioParameters::DUCKING;
|
|
|
| - return static_cast<uint32>(audio_engine_mix_format->nChannels);
|
| + // Use 10ms frame size as default.
|
| + int frames_per_buffer = sample_rate / 100;
|
| + return AudioParameters(
|
| + AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, 0, sample_rate,
|
| + 16, frames_per_buffer, effects);
|
| }
|
|
|
| // static
|
| @@ -276,7 +285,7 @@ HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
|
| hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
|
| endpoint_device.Receive());
|
| } else if (device_id == AudioManagerBase::kLoopbackInputDeviceId) {
|
| - // Capture the default playback stream.
|
| + // Get the mix format of the default playback stream.
|
| hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
|
| endpoint_device.Receive());
|
| } else {
|
| @@ -469,7 +478,15 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
|
| // Retrieve the default capture audio endpoint for the specified role.
|
| // Note that, in Windows Vista, the MMDevice API supports device roles
|
| // but the system-supplied user interface programs do not.
|
| - hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
|
| +
|
| + // If the caller has requested to turn on ducking, we select the default
|
| + // communications device instead of the default capture device.
|
| + // This implicitly turns on ducking and allows the user to control the
|
| + // attenuation level.
|
| + ERole role = (effects_ & AudioParameters::DUCKING) ?
|
| + eCommunications : eConsole;
|
| +
|
| + hr = enumerator->GetDefaultAudioEndpoint(eCapture, role,
|
| endpoint_device_.Receive());
|
| } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
|
| // Capture the default playback stream.
|
| @@ -478,6 +495,10 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
|
| } else {
|
| // Retrieve a capture endpoint device that is specified by an endpoint
|
| // device-identification string.
|
| + // TODO(tommi): Opt into ducking for non-default audio devices.
|
| + DLOG_IF(WARNING, effects_ & AudioParameters::DUCKING)
|
| + << "Ducking has been requested for a non-default device."
|
| + "Not implemented.";
|
| hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(),
|
| endpoint_device_.Receive());
|
| }
|
|
|