Chromium Code Reviews| Index: media/audio/win/audio_low_latency_output_win.cc |
| diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc |
| index 847a5a2b029bc08dbf0999fa852098b82053576e..1baa15a51da7fbe18b782404e7413414b57989e7 100644 |
| --- a/media/audio/win/audio_low_latency_output_win.cc |
| +++ b/media/audio/win/audio_low_latency_output_win.cc |
| @@ -6,6 +6,8 @@ |
| #include <Functiondiscoverykeys_devpkey.h> |
| +#include <climits> |
| + |
| #include "base/command_line.h" |
| #include "base/logging.h" |
| #include "base/macros.h" |
| @@ -18,6 +20,7 @@ |
| #include "media/audio/win/audio_manager_win.h" |
| #include "media/audio/win/avrt_wrapper_win.h" |
| #include "media/audio/win/core_audio_util_win.h" |
| +#include "media/base/audio_sample_types.h" |
| #include "media/base/limits.h" |
| #include "media/base/media_switches.h" |
| @@ -71,8 +74,7 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
| device_role_(device_role), |
| share_mode_(GetShareMode()), |
| num_written_frames_(0), |
| - source_(NULL), |
| - audio_bus_(AudioBus::Create(params)) { |
| + source_(NULL) { |
| DCHECK(manager_); |
| // The empty string is used to indicate a default device and the |
| @@ -89,6 +91,15 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
| bool avrt_init = avrt::Initialize(); |
| DCHECK(avrt_init) << "Failed to load the avrt.dll"; |
| + // New set that appropriate for float output. |
| + AudioParameters floatParams = AudioParameters(AudioParameters( |
|
DaleCurtis
2016/11/07 18:54:00
float_params also you're constructing three audio
Raymond Toy
2016/11/07 19:03:20
Whoa! Don't know how that happened!
|
| + params.format(), params.channel_layout(), params.sample_rate(), |
| + // Ignore the given bits per sample because we're outputting |
| + // floats. |
| + sizeof(float) * CHAR_BIT, params.frames_per_buffer())); |
| + |
| + audio_bus_ = AudioBus::Create(floatParams); |
| + |
| // Set up the desired render format specified by the client. We use the |
| // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering |
| // and high precision data can be supported. |
| @@ -96,27 +107,27 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
| // Begin with the WAVEFORMATEX structure that specifies the basic format. |
| WAVEFORMATEX* format = &format_.Format; |
| format->wFormatTag = WAVE_FORMAT_EXTENSIBLE; |
| - format->nChannels = params.channels(); |
| - format->nSamplesPerSec = params.sample_rate(); |
| - format->wBitsPerSample = params.bits_per_sample(); |
| + format->nChannels = floatParams.channels(); |
| + format->nSamplesPerSec = floatParams.sample_rate(); |
| + format->wBitsPerSample = floatParams.bits_per_sample(); |
| format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels; |
| format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign; |
| format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX); |
| // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE. |
| - format_.Samples.wValidBitsPerSample = params.bits_per_sample(); |
| + format_.Samples.wValidBitsPerSample = floatParams.bits_per_sample(); |
| format_.dwChannelMask = CoreAudioUtil::GetChannelConfig(device_id, eRender); |
| - format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; |
| + format_.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; |
| // Store size (in different units) of audio packets which we expect to |
| // get from the audio endpoint device in each render event. |
| - packet_size_frames_ = params.frames_per_buffer(); |
| - packet_size_bytes_ = params.GetBytesPerBuffer(); |
| + packet_size_frames_ = floatParams.frames_per_buffer(); |
| + packet_size_bytes_ = floatParams.GetBytesPerBuffer(); |
| DVLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign; |
| DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; |
| DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; |
| DVLOG(1) << "Number of milliseconds per packet: " |
| - << params.GetBufferDuration().InMillisecondsF(); |
| + << floatParams.GetBufferDuration().InMillisecondsF(); |
| // All events are auto-reset events and non-signaled initially. |
| @@ -544,13 +555,9 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { |
| uint32_t num_filled_bytes = frames_filled * format_.Format.nBlockAlign; |
| DCHECK_LE(num_filled_bytes, packet_size_bytes_); |
| - // Note: If this ever changes to output raw float the data must be |
| - // clipped and sanitized since it may come from an untrusted |
| - // source such as NaCl. |
| - const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; |
| audio_bus_->Scale(volume_); |
| - audio_bus_->ToInterleaved( |
| - frames_filled, bytes_per_sample, audio_data); |
| + audio_bus_->ToInterleaved<Float32SampleTypeTraits>( |
| + frames_filled, reinterpret_cast<float*>(audio_data)); |
| // Release the buffer space acquired in the GetBuffer() call. |
| // Render silence if we were not able to fill up the buffer totally. |