| Index: media/audio/win/audio_low_latency_output_win.cc
|
| diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc
|
| index 847a5a2b029bc08dbf0999fa852098b82053576e..48332885e75e412a0d27f2cdc8e45eb3a006c2fd 100644
|
| --- a/media/audio/win/audio_low_latency_output_win.cc
|
| +++ b/media/audio/win/audio_low_latency_output_win.cc
|
| @@ -6,6 +6,8 @@
|
|
|
| #include <Functiondiscoverykeys_devpkey.h>
|
|
|
| +#include <climits>
|
| +
|
| #include "base/command_line.h"
|
| #include "base/logging.h"
|
| #include "base/macros.h"
|
| @@ -18,6 +20,7 @@
|
| #include "media/audio/win/audio_manager_win.h"
|
| #include "media/audio/win/avrt_wrapper_win.h"
|
| #include "media/audio/win/core_audio_util_win.h"
|
| +#include "media/base/audio_sample_types.h"
|
| #include "media/base/limits.h"
|
| #include "media/base/media_switches.h"
|
|
|
| @@ -71,8 +74,7 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
|
| device_role_(device_role),
|
| share_mode_(GetShareMode()),
|
| num_written_frames_(0),
|
| - source_(NULL),
|
| - audio_bus_(AudioBus::Create(params)) {
|
| + source_(NULL) {
|
| DCHECK(manager_);
|
|
|
| // The empty string is used to indicate a default device and the
|
| @@ -89,6 +91,15 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
|
| bool avrt_init = avrt::Initialize();
|
| DCHECK(avrt_init) << "Failed to load the avrt.dll";
|
|
|
| + // New set that appropriate for float output.
|
| + AudioParameters float_params(
|
| + params.format(), params.channel_layout(), params.sample_rate(),
|
| + // Ignore the given bits per sample because we're outputting
|
| + // floats.
|
| + sizeof(float) * CHAR_BIT, params.frames_per_buffer());
|
| +
|
| + audio_bus_ = AudioBus::Create(float_params);
|
| +
|
| // Set up the desired render format specified by the client. We use the
|
| // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
|
| // and high precision data can be supported.
|
| @@ -96,27 +107,27 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
|
| // Begin with the WAVEFORMATEX structure that specifies the basic format.
|
| WAVEFORMATEX* format = &format_.Format;
|
| format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
|
| - format->nChannels = params.channels();
|
| - format->nSamplesPerSec = params.sample_rate();
|
| - format->wBitsPerSample = params.bits_per_sample();
|
| + format->nChannels = float_params.channels();
|
| + format->nSamplesPerSec = float_params.sample_rate();
|
| + format->wBitsPerSample = float_params.bits_per_sample();
|
| format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
|
| format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
|
| format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
|
|
|
| // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
|
| - format_.Samples.wValidBitsPerSample = params.bits_per_sample();
|
| + format_.Samples.wValidBitsPerSample = float_params.bits_per_sample();
|
| format_.dwChannelMask = CoreAudioUtil::GetChannelConfig(device_id, eRender);
|
| - format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
|
| + format_.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
|
|
|
| // Store size (in different units) of audio packets which we expect to
|
| // get from the audio endpoint device in each render event.
|
| - packet_size_frames_ = params.frames_per_buffer();
|
| - packet_size_bytes_ = params.GetBytesPerBuffer();
|
| + packet_size_frames_ = float_params.frames_per_buffer();
|
| + packet_size_bytes_ = float_params.GetBytesPerBuffer();
|
| DVLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign;
|
| DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
|
| DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_;
|
| DVLOG(1) << "Number of milliseconds per packet: "
|
| - << params.GetBufferDuration().InMillisecondsF();
|
| + << float_params.GetBufferDuration().InMillisecondsF();
|
|
|
| // All events are auto-reset events and non-signaled initially.
|
|
|
| @@ -544,13 +555,9 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
|
| uint32_t num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
|
| DCHECK_LE(num_filled_bytes, packet_size_bytes_);
|
|
|
| - // Note: If this ever changes to output raw float the data must be
|
| - // clipped and sanitized since it may come from an untrusted
|
| - // source such as NaCl.
|
| - const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
|
| audio_bus_->Scale(volume_);
|
| - audio_bus_->ToInterleaved(
|
| - frames_filled, bytes_per_sample, audio_data);
|
| + audio_bus_->ToInterleaved<Float32SampleTypeTraits>(
|
| + frames_filled, reinterpret_cast<float*>(audio_data));
|
|
|
| // Release the buffer space acquired in the GetBuffer() call.
|
| // Render silence if we were not able to fill up the buffer totally.
|
|
|