Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(26)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 10823100: Adds support for multi-channel output audio for the low-latency path in Windows. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Added more mixing cases and improved the tests Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/audio/win/audio_low_latency_output_win.h" 5 #include "media/audio/win/audio_low_latency_output_win.h"
6 6
7 #include <Functiondiscoverykeys_devpkey.h> 7 #include <Functiondiscoverykeys_devpkey.h>
8 8
9 #include "base/command_line.h" 9 #include "base/command_line.h"
10 #include "base/logging.h" 10 #include "base/logging.h"
11 #include "base/memory/scoped_ptr.h" 11 #include "base/memory/scoped_ptr.h"
12 #include "base/utf_string_conversions.h" 12 #include "base/utf_string_conversions.h"
13 #include "media/audio/audio_util.h" 13 #include "media/audio/audio_util.h"
14 #include "media/audio/win/audio_manager_win.h" 14 #include "media/audio/win/audio_manager_win.h"
15 #include "media/audio/win/avrt_wrapper_win.h" 15 #include "media/audio/win/avrt_wrapper_win.h"
16 #include "media/base/media_switches.h" 16 #include "media/base/media_switches.h"
17 17
18 using base::win::ScopedComPtr; 18 using base::win::ScopedComPtr;
19 using base::win::ScopedCOMInitializer; 19 using base::win::ScopedCOMInitializer;
20 using base::win::ScopedCoMem; 20 using base::win::ScopedCoMem;
21 21
22 namespace media { 22 namespace media {
23 23
24 typedef uint32 ChannelConfig;
25
26 // Ensure that the alignment of members will be on a boundary that is a
27 // multiple of 1 byte.
28 #pragma pack(push)
29 #pragma pack(1)
30
31 struct LayoutMono_16bit {
32 int16 center;
33 };
34
35 struct LayoutStereo_16bit {
36 int16 left;
37 int16 right;
38 };
39
40 struct Layout7_1_16bit {
41 int16 front_left;
42 int16 front_right;
43 int16 front_center;
44 int16 low_frequency;
45 int16 back_left;
46 int16 back_right;
47 int16 side_left;
48 int16 side_right;
49 };
50
51 #pragma pack(pop)
52
53 // Retrieves the stream format that the audio engine uses for its internal
54 // processing/mixing of shared-mode streams.
55 static HRESULT GetMixFormat(ERole device_role, WAVEFORMATEX** device_format) {
56 // Note that we are using the IAudioClient::GetMixFormat() API to get the
57 // device format in this function. It is in fact possible to be "more native",
58 // and ask the endpoint device directly for its properties. Given a reference
59 // to the IMMDevice interface of an endpoint object, a client can obtain a
60 // reference to the endpoint object's property store by calling the
61 // IMMDevice::OpenPropertyStore() method. However, I have not been able to
62 // access any valuable information using this method on my HP Z600 desktop,
63 // hence it feels more appropriate to use the IAudioClient::GetMixFormat()
64 // approach instead.
65
66 // Calling this function only makes sense for shared mode streams, since
67 // if the device will be opened in exclusive mode, then the application
68 // specified format is used instead. However, the result of this method can
69 // be useful for testing purposes so we don't DCHECK here.
70 DLOG_IF(WARNING, WASAPIAudioOutputStream::GetShareMode() ==
71 AUDCLNT_SHAREMODE_EXCLUSIVE) <<
72 "The mixing sample rate will be ignored for exclusive-mode streams.";
73
74 // It is assumed that this static method is called from a COM thread, i.e.,
75 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
76 ScopedComPtr<IMMDeviceEnumerator> enumerator;
77 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
scherkus (not reviewing) 2012/08/02 17:14:14 remove extra space between = and CCI()
henrika (OOO until Aug 14) 2012/08/03 14:55:56 Done.
78 NULL,
79 CLSCTX_INPROC_SERVER,
80 __uuidof(IMMDeviceEnumerator),
81 enumerator.ReceiveVoid());
82 if (FAILED(hr)) {
83 NOTREACHED() << "error code: " << std::hex << hr;
84 return 0.0;
85 }
86
87 ScopedComPtr<IMMDevice> endpoint_device;
88 hr = enumerator->GetDefaultAudioEndpoint(eRender,
89 device_role,
90 endpoint_device.Receive());
91 if (FAILED(hr)) {
92 // This will happen if there's no audio output device found or available
93 // (e.g. some audio cards that have outputs will still report them as
94 // "not found" when no speaker is plugged into the output jack).
95 LOG(WARNING) << "No audio end point: " << std::hex << hr;
96 return 0.0;
97 }
98
99 ScopedComPtr<IAudioClient> audio_client;
100 hr = endpoint_device->Activate(__uuidof(IAudioClient),
101 CLSCTX_INPROC_SERVER,
102 NULL,
103 audio_client.ReceiveVoid());
104 DCHECK(SUCCEEDED(hr)) << "Failed to activate device: " << std::hex << hr;
105 if (SUCCEEDED(hr)) {
106 hr = audio_client->GetMixFormat(device_format);
107 DCHECK(SUCCEEDED(hr)) << "GetMixFormat: " << std::hex << hr;
108 }
109
110 return hr;
111 }
112
113 // Retrieves an integer mask which corresponds to the channel layout the
114 // audio engine uses for its internal processing/mixing of shared-mode
115 // streams. This mask indicates which channels are present in the multi-
116 // channel stream. The least significant bit corresponds with the Front Left
117 // speaker, the next least significant bit corresponds to the Front Right
118 // speaker, and so on, continuing in the order defined in KsMedia.h.
119 // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85 ).aspx
120 // for more details.
121 static ChannelConfig GetChannelConfig() {
122 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
123 // number of channels and the mapping of channels to speakers for
124 // multichannel devices.
125 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
126 HRESULT hr = GetMixFormat(
127 eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
scherkus (not reviewing) 2012/08/02 17:14:14 indent 2 more spaces
henrika (OOO until Aug 14) 2012/08/03 14:55:56 Done.
128 if (FAILED(hr))
129 return 0;
130
131 // The dwChannelMask member specifies which channels are present in the
132 // multichannel stream. The least significant bit corresponds to the
133 // front left speaker, the next least significant bit corresponds to the
134 // front right speaker, and so on.
135 // See http://msdn.microsoft.com/en-us/library/windows/desktop/dd757714(v=vs.8 5).aspx
136 // for more details on the channel mapping.
137 DVLOG(2) << "dwChannelMask: 0x" << std::hex << format_ex->dwChannelMask;
138
139 #if !defined(NDEBUG)
140 // See http://en.wikipedia.org/wiki/Surround_sound for more details on
141 // how to name various speaker configurations. The list below is not complete.
142 const char* speaker_config("Undefined");
scherkus (not reviewing) 2012/08/02 17:14:14 () notation is a bit odd ... use = instead
henrika (OOO until Aug 14) 2012/08/03 14:55:56 Done.
143 switch (format_ex->dwChannelMask) {
144 case KSAUDIO_SPEAKER_MONO:
145 speaker_config = "Mono";
146 break;
147 case KSAUDIO_SPEAKER_STEREO:
148 speaker_config = "Stereo";
149 break;
150 case KSAUDIO_SPEAKER_5POINT1_SURROUND:
151 speaker_config = "5.1 surround";
152 break;
153 case KSAUDIO_SPEAKER_5POINT1:
scherkus (not reviewing) 2012/08/02 17:14:14 remove extra spaces
henrika (OOO until Aug 14) 2012/08/03 14:55:56 Done.
154 speaker_config = "5.1";
155 break;
156 case KSAUDIO_SPEAKER_7POINT1_SURROUND:
scherkus (not reviewing) 2012/08/02 17:14:14 remove extra spaces
henrika (OOO until Aug 14) 2012/08/03 14:55:56 Done.
157 speaker_config = "7.1 surround";
158 break;
159 case KSAUDIO_SPEAKER_7POINT1:
160 speaker_config = "7.1";
161 break;
162 default:
163 break;
164 }
165 DVLOG(2) << "speaker configuration: " << speaker_config;
166 #endif
167
168 return static_cast<ChannelConfig>(format_ex->dwChannelMask);
169 }
170
171 // Converts Microsoft's channel configuration to ChannelLayout.
172 // This mapping is not perfect but the best we can do given the current
173 // ChannelLayout enumerator and the Windows-specific speaker configurations
174 // defined in ksmedia.h. Don't assume that the channel ordering in
175 // ChannelLayout is exactly the same as the Windows specific configuration.
176 // As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
177 // CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
178 // speakers are different in these two definitions.
179 static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
180 switch (config) {
181 case KSAUDIO_SPEAKER_DIRECTOUT:
182 return CHANNEL_LAYOUT_NONE;
183 case KSAUDIO_SPEAKER_MONO:
184 return CHANNEL_LAYOUT_MONO;
185 case KSAUDIO_SPEAKER_STEREO:
186 return CHANNEL_LAYOUT_STEREO;
187 case KSAUDIO_SPEAKER_QUAD:
188 return CHANNEL_LAYOUT_QUAD;
189 case KSAUDIO_SPEAKER_SURROUND:
190 return CHANNEL_LAYOUT_4_0;
191 case KSAUDIO_SPEAKER_5POINT1:
192 return CHANNEL_LAYOUT_5_1_BACK;
193 case KSAUDIO_SPEAKER_5POINT1_SURROUND:
194 return CHANNEL_LAYOUT_5_1;
195 case KSAUDIO_SPEAKER_7POINT1:
196 return CHANNEL_LAYOUT_7_1_WIDE;
197 case KSAUDIO_SPEAKER_7POINT1_SURROUND:
198 return CHANNEL_LAYOUT_7_1;
199 default:
200 DVLOG(1) << "Unsupported channel layout: " << config;
201 return CHANNEL_LAYOUT_UNSUPPORTED;
202 }
203 }
204
205 // mono/stereo -> N.1 up-mixing where N=out_channels-1.
206 // See http://www.w3.org/TR/webaudio/#UpMix-sub for details.
207 // TODO(henrika): use ChannelLayout for channel parameters.
208 // TODO(henrika): can we do this in-place by processing the samples in
209 // reverse order when sizeof(out) > sizeof(in) (upmixing)?
210 static int ChannelUpMix(void* input,
scherkus (not reviewing) 2012/08/02 17:14:14 man this function is going to get massive over tim
henrika (OOO until Aug 14) 2012/08/03 14:55:56 I have ideas on how to make it smaller, yes. Have
henrika (OOO until Aug 14) 2012/08/03 15:18:34 More on the 8-bit case. I am actually not sure tha
211 void* output,
212 int in_channels,
213 int out_channels,
214 size_t number_of_input_bytes) {
215 DCHECK_GT(out_channels, in_channels);
216
217 const int kChannelRatio = out_channels / in_channels;
218
219 // 1 -> 2
220 if (in_channels == 1 && out_channels == 2) {
221 LayoutMono_16bit* in = reinterpret_cast<LayoutMono_16bit*>(input);
222 LayoutStereo_16bit* out = reinterpret_cast<LayoutStereo_16bit*>(output);
223 int number_of_input_mono_samples = (number_of_input_bytes >> 1);
224
225 // Copy same input mono sample to both output channels.
226 for (int i = 0; i < number_of_input_mono_samples; ++i) {
227 out->left = in->center;
228 out->right = in->center;
229 in++;
230 out++;
231 }
232
233 return (kChannelRatio * number_of_input_bytes);
234 }
235
236 // 1 -> 7.1
237 if (in_channels == 1 && out_channels == 8) {
238 LayoutMono_16bit* in = reinterpret_cast<LayoutMono_16bit*>(input);
239 Layout7_1_16bit* out = reinterpret_cast<Layout7_1_16bit*>(output);
240 int number_of_input_mono_samples = (number_of_input_bytes >> 1);
241
242 // Zero out all frames first.
243 memset(out, 0, number_of_input_mono_samples * sizeof(out[0]));
244
245 // Copy input sample to output center channel.
246 for (int i = 0; i < number_of_input_mono_samples; ++i) {
247 out->front_center = in->center;
248 in++;
249 out++;
250 }
251
252 return (kChannelRatio * number_of_input_bytes);
253 }
254
255 // 2 -> 7.1
256 if (in_channels == 2 && out_channels == 8) {
257 LayoutStereo_16bit* in = reinterpret_cast<LayoutStereo_16bit*>(input);
258 Layout7_1_16bit* out = reinterpret_cast<Layout7_1_16bit*>(output);
259 int number_of_input_stereo_samples = (number_of_input_bytes >> 2);
260
261 // Zero out all frames first.
262 memset(out, 0, number_of_input_stereo_samples * sizeof(out[0]));
263
264 // Copy left and right input channels to the same output channels.
265 for (int i = 0; i < number_of_input_stereo_samples; ++i) {
266 out->front_left = in->left;
267 out->front_right = in->right;
268 in++;
269 out++;
270 }
271
272 return (kChannelRatio * number_of_input_bytes);
273 }
274
275 LOG(ERROR) << "Up-mixing " << in_channels << "->"
276 << out_channels << " is not supported.";
277 return 0;
278 }
279
24 // static 280 // static
25 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { 281 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
26 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); 282 const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
27 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) 283 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
28 return AUDCLNT_SHAREMODE_EXCLUSIVE; 284 return AUDCLNT_SHAREMODE_EXCLUSIVE;
29 return AUDCLNT_SHAREMODE_SHARED; 285 return AUDCLNT_SHAREMODE_SHARED;
30 } 286 }
31 287
32 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, 288 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
33 const AudioParameters& params, 289 const AudioParameters& params,
34 ERole device_role) 290 ERole device_role)
35 : com_init_(ScopedCOMInitializer::kMTA), 291 : com_init_(ScopedCOMInitializer::kMTA),
36 creating_thread_id_(base::PlatformThread::CurrentId()), 292 creating_thread_id_(base::PlatformThread::CurrentId()),
37 manager_(manager), 293 manager_(manager),
38 render_thread_(NULL), 294 render_thread_(NULL),
39 opened_(false), 295 opened_(false),
40 started_(false), 296 started_(false),
41 restart_rendering_mode_(false), 297 restart_rendering_mode_(false),
42 volume_(1.0), 298 volume_(1.0),
43 endpoint_buffer_size_frames_(0), 299 endpoint_buffer_size_frames_(0),
44 device_role_(device_role), 300 device_role_(device_role),
45 share_mode_(GetShareMode()), 301 share_mode_(GetShareMode()),
302 client_channel_count_(params.channels()),
46 num_written_frames_(0), 303 num_written_frames_(0),
47 source_(NULL) { 304 source_(NULL) {
48 CHECK(com_init_.succeeded()); 305 CHECK(com_init_.succeeded());
49 DCHECK(manager_); 306 DCHECK(manager_);
50 307
51 // Load the Avrt DLL if not already loaded. Required to support MMCSS. 308 // Load the Avrt DLL if not already loaded. Required to support MMCSS.
52 bool avrt_init = avrt::Initialize(); 309 bool avrt_init = avrt::Initialize();
53 DCHECK(avrt_init) << "Failed to load the avrt.dll"; 310 DCHECK(avrt_init) << "Failed to load the avrt.dll";
54 311
55 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE) { 312 if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) {
56 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<"; 313 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<";
57 } 314 }
58 315
59 // Set up the desired render format specified by the client. 316 // Set up the desired render format specified by the client. We use the
60 format_.nSamplesPerSec = params.sample_rate(); 317 // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
61 format_.wFormatTag = WAVE_FORMAT_PCM; 318 // and high precision data can be supported.
62 format_.wBitsPerSample = params.bits_per_sample(); 319
63 format_.nChannels = params.channels(); 320 // Begin with the WAVEFORMATEX structure that specifies the basic format.
64 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; 321 WAVEFORMATEX* format = &format_.Format;
65 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; 322 format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
66 format_.cbSize = 0; 323 format->nChannels = HardwareChannelCount();
324 format->nSamplesPerSec = params.sample_rate();
325 format->wBitsPerSample = params.bits_per_sample();
326 format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
327 format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
328 format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
329
330 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
331 format_.Samples.wValidBitsPerSample = params.bits_per_sample();
332 format_.dwChannelMask = GetChannelConfig();
333 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
67 334
68 // Size in bytes of each audio frame. 335 // Size in bytes of each audio frame.
69 frame_size_ = format_.nBlockAlign; 336 frame_size_ = format->nBlockAlign;
337
338 // It is possible to set the number of channels in |params| to a lower value
339 // than we use as the internal number of audio channels when the audio stream
340 // is opened. If this mode (channel_factor_ > 1) is set, the native audio
341 // layer will expect a larger number of channels in the interleaved audio
342 // stream and a channel up-mix will be performed after the OnMoreData()
343 // callback to compensate for the lower number of channels provided by the
344 // audio source.
345 // Example: params.channels() is 2 and endpoint_channel_count() is 8 =>
346 // the audio stream is opened up in 7.1 surround mode but the source only
347 // provides a stereo signal as input, i.e., a stereo up-mix (2 -> 7.1) will
348 // take place before sending the stream to the audio driver.
349 DCHECK_GE(channel_factor(), 1) << "Unsupported channel count.";
350 DVLOG(1) << "client channels (in): " << params.channels();
351 DVLOG(1) << "channel factor: " << channel_factor();
70 352
71 // Store size (in different units) of audio packets which we expect to 353 // Store size (in different units) of audio packets which we expect to
72 // get from the audio endpoint device in each render event. 354 // get from the audio endpoint device in each render event.
73 packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign; 355 packet_size_frames_ =
74 packet_size_bytes_ = params.GetBytesPerBuffer(); 356 (channel_factor() * params.GetBytesPerBuffer()) / format->nBlockAlign;
357 packet_size_bytes_ = channel_factor() * params.GetBytesPerBuffer();
75 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate(); 358 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate();
76 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_; 359 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
77 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; 360 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
78 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; 361 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_;
79 362
80 // All events are auto-reset events and non-signaled initially. 363 // All events are auto-reset events and non-signaled initially.
81 364
82 // Create the event which the audio engine will signal each time 365 // Create the event which the audio engine will signal each time
83 // a buffer becomes ready to be processed by the client. 366 // a buffer becomes ready to be processed by the client.
84 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); 367 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
238 hr = audio_client_->Reset(); 521 hr = audio_client_->Reset();
239 if (FAILED(hr)) { 522 if (FAILED(hr)) {
240 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) 523 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
241 << "Failed to reset streaming: " << std::hex << hr; 524 << "Failed to reset streaming: " << std::hex << hr;
242 } 525 }
243 526
244 // Extra safety check to ensure that the buffers are cleared. 527 // Extra safety check to ensure that the buffers are cleared.
245 // If the buffers are not cleared correctly, the next call to Start() 528 // If the buffers are not cleared correctly, the next call to Start()
246 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). 529 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
247 // This check is is only needed for shared-mode streams. 530 // This check is is only needed for shared-mode streams.
248 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { 531 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
249 UINT32 num_queued_frames = 0; 532 UINT32 num_queued_frames = 0;
250 audio_client_->GetCurrentPadding(&num_queued_frames); 533 audio_client_->GetCurrentPadding(&num_queued_frames);
251 DCHECK_EQ(0u, num_queued_frames); 534 DCHECK_EQ(0u, num_queued_frames);
252 } 535 }
253 536
254 // Ensure that we don't quit the main thread loop immediately next 537 // Ensure that we don't quit the main thread loop immediately next
255 // time Start() is called. 538 // time Start() is called.
256 ResetEvent(stop_render_event_.Get()); 539 ResetEvent(stop_render_event_.Get());
257 540
258 started_ = false; 541 started_ = false;
(...skipping 27 matching lines...) Expand all
286 } 569 }
287 volume_ = volume_float; 570 volume_ = volume_float;
288 } 571 }
289 572
290 void WASAPIAudioOutputStream::GetVolume(double* volume) { 573 void WASAPIAudioOutputStream::GetVolume(double* volume) {
291 DVLOG(1) << "GetVolume()"; 574 DVLOG(1) << "GetVolume()";
292 *volume = static_cast<double>(volume_); 575 *volume = static_cast<double>(volume_);
293 } 576 }
294 577
295 // static 578 // static
579 int WASAPIAudioOutputStream::HardwareChannelCount() {
580 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
581 // number of channels and the mapping of channels to speakers for
582 // multichannel devices.
583 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
584 HRESULT hr = GetMixFormat(
585 eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
586 if (FAILED(hr))
587 return 0;
588
589 // Number of channels in the stream. Corresponds to the number of bits
590 // set in the dwChannelMask.
591 DVLOG(1) << "endpoint channels (out): " << format_ex->Format.nChannels;
592
593 return static_cast<int>(format_ex->Format.nChannels);
594 }
595
596 // static
597 ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() {
598 return ChannelConfigToChannelLayout(GetChannelConfig());
599 }
600
601 // static
296 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { 602 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
297 // Calling this function only makes sense for shared mode streams, since 603 base::win::ScopedCoMem<WAVEFORMATEX> format;
298 // if the device will be opened in exclusive mode, then the application 604 HRESULT hr = GetMixFormat(device_role, &format);
299 // specified format is used instead. However, the result of this method can 605 if (FAILED(hr))
300 // be useful for testing purposes so we don't DCHECK here. 606 return 0;
301 DLOG_IF(WARNING, GetShareMode() == AUDCLNT_SHAREMODE_EXCLUSIVE) <<
302 "The mixing sample rate will be ignored for exclusive-mode streams.";
303 607
304 // It is assumed that this static method is called from a COM thread, i.e., 608 DVLOG(2) << "nSamplesPerSec: " << format->nSamplesPerSec;
305 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. 609 return static_cast<int>(format->nSamplesPerSec);
306 ScopedComPtr<IMMDeviceEnumerator> enumerator;
307 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
308 NULL,
309 CLSCTX_INPROC_SERVER,
310 __uuidof(IMMDeviceEnumerator),
311 enumerator.ReceiveVoid());
312 if (FAILED(hr)) {
313 NOTREACHED() << "error code: " << std::hex << hr;
314 return 0.0;
315 }
316
317 ScopedComPtr<IMMDevice> endpoint_device;
318 hr = enumerator->GetDefaultAudioEndpoint(eRender,
319 device_role,
320 endpoint_device.Receive());
321 if (FAILED(hr)) {
322 // This will happen if there's no audio output device found or available
323 // (e.g. some audio cards that have outputs will still report them as
324 // "not found" when no speaker is plugged into the output jack).
325 LOG(WARNING) << "No audio end point: " << std::hex << hr;
326 return 0.0;
327 }
328
329 ScopedComPtr<IAudioClient> audio_client;
330 hr = endpoint_device->Activate(__uuidof(IAudioClient),
331 CLSCTX_INPROC_SERVER,
332 NULL,
333 audio_client.ReceiveVoid());
334 if (FAILED(hr)) {
335 NOTREACHED() << "error code: " << std::hex << hr;
336 return 0.0;
337 }
338
339 // Retrieve the stream format that the audio engine uses for its internal
340 // processing of shared-mode streams.
341 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
342 hr = audio_client->GetMixFormat(&audio_engine_mix_format);
343 if (FAILED(hr)) {
344 NOTREACHED() << "error code: " << std::hex << hr;
345 return 0.0;
346 }
347
348 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
349 } 610 }
350 611
351 void WASAPIAudioOutputStream::Run() { 612 void WASAPIAudioOutputStream::Run() {
352 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); 613 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
353 614
354 // Increase the thread priority. 615 // Increase the thread priority.
355 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); 616 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
356 617
357 // Enable MMCSS to ensure that this thread receives prioritized access to 618 // Enable MMCSS to ensure that this thread receives prioritized access to
358 // CPU resources. 619 // CPU resources.
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
419 { 680 {
420 // |audio_samples_render_event_| has been set. 681 // |audio_samples_render_event_| has been set.
421 UINT32 num_queued_frames = 0; 682 UINT32 num_queued_frames = 0;
422 uint8* audio_data = NULL; 683 uint8* audio_data = NULL;
423 684
424 // Contains how much new data we can write to the buffer without 685 // Contains how much new data we can write to the buffer without
425 // the risk of overwriting previously written data that the audio 686 // the risk of overwriting previously written data that the audio
426 // engine has not yet read from the buffer. 687 // engine has not yet read from the buffer.
427 size_t num_available_frames = 0; 688 size_t num_available_frames = 0;
428 689
429 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { 690 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
430 // Get the padding value which represents the amount of rendering 691 // Get the padding value which represents the amount of rendering
431 // data that is queued up to play in the endpoint buffer. 692 // data that is queued up to play in the endpoint buffer.
432 hr = audio_client_->GetCurrentPadding(&num_queued_frames); 693 hr = audio_client_->GetCurrentPadding(&num_queued_frames);
433 num_available_frames = 694 num_available_frames =
434 endpoint_buffer_size_frames_ - num_queued_frames; 695 endpoint_buffer_size_frames_ - num_queued_frames;
435 } else { 696 } else {
436 // While the stream is running, the system alternately sends one 697 // While the stream is running, the system alternately sends one
437 // buffer or the other to the client. This form of double buffering 698 // buffer or the other to the client. This form of double buffering
438 // is referred to as "ping-ponging". Each time the client receives 699 // is referred to as "ping-ponging". Each time the client receives
439 // a buffer from the system (triggers this event) the client must 700 // a buffer from the system (triggers this event) the client must
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
471 // a render event and the time when the first audio sample in a 732 // a render event and the time when the first audio sample in a
472 // packet is played out through the speaker. This delay value 733 // packet is played out through the speaker. This delay value
473 // can typically be utilized by an acoustic echo-control (AEC) 734 // can typically be utilized by an acoustic echo-control (AEC)
474 // unit at the render side. 735 // unit at the render side.
475 UINT64 position = 0; 736 UINT64 position = 0;
476 int audio_delay_bytes = 0; 737 int audio_delay_bytes = 0;
477 hr = audio_clock->GetPosition(&position, NULL); 738 hr = audio_clock->GetPosition(&position, NULL);
478 if (SUCCEEDED(hr)) { 739 if (SUCCEEDED(hr)) {
479 // Stream position of the sample that is currently playing 740 // Stream position of the sample that is currently playing
480 // through the speaker. 741 // through the speaker.
481 double pos_sample_playing_frames = format_.nSamplesPerSec * 742 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
482 (static_cast<double>(position) / device_frequency); 743 (static_cast<double>(position) / device_frequency);
483 744
484 // Stream position of the last sample written to the endpoint 745 // Stream position of the last sample written to the endpoint
485 // buffer. Note that, the packet we are about to receive in 746 // buffer. Note that, the packet we are about to receive in
486 // the upcoming callback is also included. 747 // the upcoming callback is also included.
487 size_t pos_last_sample_written_frames = 748 size_t pos_last_sample_written_frames =
488 num_written_frames_ + packet_size_frames_; 749 num_written_frames_ + packet_size_frames_;
489 750
490 // Derive the actual delay value which will be fed to the 751 // Derive the actual delay value which will be fed to the
491 // render client using the OnMoreData() callback. 752 // render client using the OnMoreData() callback.
492 audio_delay_bytes = (pos_last_sample_written_frames - 753 audio_delay_bytes = (pos_last_sample_written_frames -
493 pos_sample_playing_frames) * frame_size_; 754 pos_sample_playing_frames) * frame_size_;
494 } 755 }
495 756
496 // Read a data packet from the registered client source and 757 // Read a data packet from the registered client source and
497 // deliver a delay estimate in the same callback to the client. 758 // deliver a delay estimate in the same callback to the client.
498 // A time stamp is also stored in the AudioBuffersState. This 759 // A time stamp is also stored in the AudioBuffersState. This
499 // time stamp can be used at the client side to compensate for 760 // time stamp can be used at the client side to compensate for
500 // the delay between the usage of the delay value and the time 761 // the delay between the usage of the delay value and the time
501 // of generation. 762 // of generation.
502 uint32 num_filled_bytes = source_->OnMoreData( 763
503 audio_data, packet_size_bytes_, 764 uint32 num_filled_bytes = 0;
504 AudioBuffersState(0, audio_delay_bytes)); 765
766 if (channel_factor() == 1) {
767 // Case I: no up-mixing.
768 num_filled_bytes = source_->OnMoreData(
769 audio_data, packet_size_bytes_,
770 AudioBuffersState(0, audio_delay_bytes));
771 } else {
772 // Case II: up-mixing.
773 const int audio_source_size_bytes =
774 packet_size_bytes_ / channel_factor();
775 scoped_array<uint8> buffer;
776 buffer.reset(new uint8[audio_source_size_bytes]);
777
778 num_filled_bytes = source_->OnMoreData(
779 buffer.get(), audio_source_size_bytes,
780 AudioBuffersState(0, audio_delay_bytes));
781
782 num_filled_bytes = ChannelUpMix(buffer.get(),
783 &audio_data[0],
784 client_channel_count_,
785 endpoint_channel_count(),
786 num_filled_bytes);
787 }
505 788
506 // Perform in-place, software-volume adjustments. 789 // Perform in-place, software-volume adjustments.
790 // TODO(henrika): it is possible to adjust the volume in the
791 // ChannelUpMix() function.
507 media::AdjustVolume(audio_data, 792 media::AdjustVolume(audio_data,
508 num_filled_bytes, 793 num_filled_bytes,
509 format_.nChannels, 794 endpoint_channel_count(),
510 format_.wBitsPerSample >> 3, 795 format_.Format.wBitsPerSample >> 3,
511 volume_); 796 volume_);
512 797
513 // Zero out the part of the packet which has not been filled by 798 // Zero out the part of the packet which has not been filled by
514 // the client. Using silence is the least bad option in this 799 // the client. Using silence is the least bad option in this
515 // situation. 800 // situation.
516 if (num_filled_bytes < packet_size_bytes_) { 801 if (num_filled_bytes < packet_size_bytes_) {
517 memset(&audio_data[num_filled_bytes], 0, 802 memset(&audio_data[num_filled_bytes], 0,
518 (packet_size_bytes_ - num_filled_bytes)); 803 (packet_size_bytes_ - num_filled_bytes));
519 } 804 }
520 805
521 // Release the buffer space acquired in the GetBuffer() call. 806 // Release the buffer space acquired in the GetBuffer() call.
522 DWORD flags = 0; 807 DWORD flags = 0;
523 audio_render_client_->ReleaseBuffer(packet_size_frames_, 808 audio_render_client_->ReleaseBuffer(packet_size_frames_,
524 flags); 809 flags);
525 810
526 num_written_frames_ += packet_size_frames_; 811 num_written_frames_ += packet_size_frames_;
527 } 812 }
528 } 813 }
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
598 // Creates and activates an IAudioClient COM object given the selected 883 // Creates and activates an IAudioClient COM object given the selected
599 // render endpoint device. 884 // render endpoint device.
600 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), 885 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
601 CLSCTX_INPROC_SERVER, 886 CLSCTX_INPROC_SERVER,
602 NULL, 887 NULL,
603 audio_client.ReceiveVoid()); 888 audio_client.ReceiveVoid());
604 if (SUCCEEDED(hr)) { 889 if (SUCCEEDED(hr)) {
605 // Retrieve the stream format that the audio engine uses for its internal 890 // Retrieve the stream format that the audio engine uses for its internal
606 // processing/mixing of shared-mode streams. 891 // processing/mixing of shared-mode streams.
607 audio_engine_mix_format_.Reset(NULL); 892 audio_engine_mix_format_.Reset(NULL);
608 hr = audio_client->GetMixFormat(&audio_engine_mix_format_); 893 hr = audio_client->GetMixFormat(
894 reinterpret_cast<WAVEFORMATEX**>(&audio_engine_mix_format_));
609 895
610 if (SUCCEEDED(hr)) { 896 if (SUCCEEDED(hr)) {
611 audio_client_ = audio_client; 897 audio_client_ = audio_client;
612 } 898 }
613 } 899 }
614 900
615 return hr; 901 return hr;
616 } 902 }
617 903
618 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { 904 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() {
619 // Determine, before calling IAudioClient::Initialize(), whether the audio 905 // Determine, before calling IAudioClient::Initialize(), whether the audio
620 // engine supports a particular stream format. 906 // engine supports a particular stream format.
621 // In shared mode, the audio engine always supports the mix format, 907 // In shared mode, the audio engine always supports the mix format,
622 // which is stored in the |audio_engine_mix_format_| member and it is also 908 // which is stored in the |audio_engine_mix_format_| member and it is also
623 // possible to receive a proposed (closest) format if the current format is 909 // possible to receive a proposed (closest) format if the current format is
624 // not supported. 910 // not supported.
625 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; 911 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
626 HRESULT hr = audio_client_->IsFormatSupported(share_mode(), 912 HRESULT hr = audio_client_->IsFormatSupported(
627 &format_, 913 share_mode_, reinterpret_cast<WAVEFORMATEX*>(&format_),
scherkus (not reviewing) 2012/08/02 17:14:14 indent 2 more spaces
henrika (OOO until Aug 14) 2012/08/03 14:55:56 Done.
628 &closest_match); 914 reinterpret_cast<WAVEFORMATEX**>(&closest_match));
629 915
630 // This log can only be triggered for shared mode. 916 // This log can only be triggered for shared mode.
631 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " 917 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
632 << "but a closest match exists."; 918 << "but a closest match exists.";
633 // This log can be triggered both for shared and exclusive modes. 919 // This log can be triggered both for shared and exclusive modes.
634 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format."; 920 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
635 if (hr == S_FALSE) { 921 if (hr == S_FALSE) {
636 DVLOG(1) << "wFormatTag : " << closest_match->wFormatTag; 922 DVLOG(1) << "wFormatTag : " << closest_match->Format.wFormatTag;
637 DVLOG(1) << "nChannels : " << closest_match->nChannels; 923 DVLOG(1) << "nChannels : " << closest_match->Format.nChannels;
638 DVLOG(1) << "nSamplesPerSec: " << closest_match->nSamplesPerSec; 924 DVLOG(1) << "nSamplesPerSec: " << closest_match->Format.nSamplesPerSec;
639 DVLOG(1) << "wBitsPerSample: " << closest_match->wBitsPerSample; 925 DVLOG(1) << "wBitsPerSample: " << closest_match->Format.wBitsPerSample;
640 } 926 }
641 927
642 return (hr == S_OK); 928 return (hr == S_OK);
643 } 929 }
644 930
645 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { 931 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() {
646 #if !defined(NDEBUG) 932 #if !defined(NDEBUG)
647 // The period between processing passes by the audio engine is fixed for a 933 // The period between processing passes by the audio engine is fixed for a
648 // particular audio endpoint device and represents the smallest processing 934 // particular audio endpoint device and represents the smallest processing
649 // quantum for the audio engine. This period plus the stream latency between 935 // quantum for the audio engine. This period plus the stream latency between
(...skipping 21 matching lines...) Expand all
671 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) 957 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0)
672 << " [ms]"; 958 << " [ms]";
673 } 959 }
674 } 960 }
675 #endif 961 #endif
676 962
677 HRESULT hr = S_FALSE; 963 HRESULT hr = S_FALSE;
678 964
679 // Perform different initialization depending on if the device shall be 965 // Perform different initialization depending on if the device shall be
680 // opened in shared mode or in exclusive mode. 966 // opened in shared mode or in exclusive mode.
681 hr = (share_mode() == AUDCLNT_SHAREMODE_SHARED) ? 967 hr = (share_mode_ == AUDCLNT_SHAREMODE_SHARED) ?
682 SharedModeInitialization() : ExclusiveModeInitialization(); 968 SharedModeInitialization() : ExclusiveModeInitialization();
683 if (FAILED(hr)) { 969 if (FAILED(hr)) {
684 LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr; 970 LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr;
685 return hr; 971 return hr;
686 } 972 }
687 973
688 // Retrieve the length of the endpoint buffer. The buffer length represents 974 // Retrieve the length of the endpoint buffer. The buffer length represents
689 // the maximum amount of rendering data that the client can write to 975 // the maximum amount of rendering data that the client can write to
690 // the endpoint buffer during a single processing pass. 976 // the endpoint buffer during a single processing pass.
691 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. 977 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
692 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); 978 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
693 if (FAILED(hr)) 979 if (FAILED(hr))
694 return hr; 980 return hr;
695 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ 981 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
696 << " [frames]"; 982 << " [frames]";
697 983
698 // The buffer scheme for exclusive mode streams is not designed for max 984 // The buffer scheme for exclusive mode streams is not designed for max
699 // flexibility. We only allow a "perfect match" between the packet size set 985 // flexibility. We only allow a "perfect match" between the packet size set
700 // by the user and the actual endpoint buffer size. 986 // by the user and the actual endpoint buffer size.
701 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE && 987 if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE &&
702 endpoint_buffer_size_frames_ != packet_size_frames_) { 988 endpoint_buffer_size_frames_ != packet_size_frames_) {
703 hr = AUDCLNT_E_INVALID_SIZE; 989 hr = AUDCLNT_E_INVALID_SIZE;
704 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE"; 990 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE";
705 return hr; 991 return hr;
706 } 992 }
707 993
708 // Set the event handle that the audio engine will signal each time 994 // Set the event handle that the audio engine will signal each time
709 // a buffer becomes ready to be processed by the client. 995 // a buffer becomes ready to be processed by the client.
710 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); 996 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get());
711 if (FAILED(hr)) 997 if (FAILED(hr))
712 return hr; 998 return hr;
713 999
714 // Get access to the IAudioRenderClient interface. This interface 1000 // Get access to the IAudioRenderClient interface. This interface
715 // enables us to write output data to a rendering endpoint buffer. 1001 // enables us to write output data to a rendering endpoint buffer.
716 // The methods in this interface manage the movement of data packets 1002 // The methods in this interface manage the movement of data packets
717 // that contain audio-rendering data. 1003 // that contain audio-rendering data.
718 hr = audio_client_->GetService(__uuidof(IAudioRenderClient), 1004 hr = audio_client_->GetService(__uuidof(IAudioRenderClient),
719 audio_render_client_.ReceiveVoid()); 1005 audio_render_client_.ReceiveVoid());
720 return hr; 1006 return hr;
721 } 1007 }
722 1008
723 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() { 1009 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() {
724 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_SHARED); 1010 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_SHARED);
725 1011
726 // TODO(henrika): this buffer scheme is still under development. 1012 // TODO(henrika): this buffer scheme is still under development.
727 // The exact details are yet to be determined based on tests with different 1013 // The exact details are yet to be determined based on tests with different
728 // audio clients. 1014 // audio clients.
729 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); 1015 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5);
730 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { 1016 if (audio_engine_mix_format_->Format.nSamplesPerSec == 48000) {
731 // Initial tests have shown that we have to add 10 ms extra to 1017 // Initial tests have shown that we have to add 10 ms extra to
732 // ensure that we don't run empty for any packet size. 1018 // ensure that we don't run empty for any packet size.
733 glitch_free_buffer_size_ms += 10; 1019 glitch_free_buffer_size_ms += 10;
734 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { 1020 } else if (audio_engine_mix_format_->Format.nSamplesPerSec == 44100) {
735 // Initial tests have shown that we have to add 20 ms extra to 1021 // Initial tests have shown that we have to add 20 ms extra to
736 // ensure that we don't run empty for any packet size. 1022 // ensure that we don't run empty for any packet size.
737 glitch_free_buffer_size_ms += 20; 1023 glitch_free_buffer_size_ms += 20;
738 } else { 1024 } else {
739 glitch_free_buffer_size_ms += 20; 1025 glitch_free_buffer_size_ms += 20;
740 } 1026 }
741 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; 1027 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms;
742 REFERENCE_TIME requested_buffer_duration = 1028 REFERENCE_TIME requested_buffer_duration =
743 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); 1029 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000);
744 1030
745 // Initialize the audio stream between the client and the device. 1031 // Initialize the audio stream between the client and the device.
746 // We connect indirectly through the audio engine by using shared mode 1032 // We connect indirectly through the audio engine by using shared mode
747 // and WASAPI is initialized in an event driven mode. 1033 // and WASAPI is initialized in an event driven mode.
748 // Note that this API ensures that the buffer is never smaller than the 1034 // Note that this API ensures that the buffer is never smaller than the
749 // minimum buffer size needed to ensure glitch-free rendering. 1035 // minimum buffer size needed to ensure glitch-free rendering.
750 // If we requests a buffer size that is smaller than the audio engine's 1036 // If we requests a buffer size that is smaller than the audio engine's
751 // minimum required buffer size, the method sets the buffer size to this 1037 // minimum required buffer size, the method sets the buffer size to this
752 // minimum buffer size rather than to the buffer size requested. 1038 // minimum buffer size rather than to the buffer size requested.
753 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, 1039 HRESULT hr = S_FALSE;
754 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | 1040 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
755 AUDCLNT_STREAMFLAGS_NOPERSIST, 1041 AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
756 requested_buffer_duration, 1042 AUDCLNT_STREAMFLAGS_NOPERSIST,
757 0, 1043 requested_buffer_duration,
758 &format_, 1044 0,
759 NULL); 1045 reinterpret_cast<WAVEFORMATEX*>(&format_),
1046 NULL);
760 return hr; 1047 return hr;
761 } 1048 }
762 1049
763 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() { 1050 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() {
764 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_EXCLUSIVE); 1051 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
765 1052
766 float f = (1000.0 * packet_size_frames_) / format_.nSamplesPerSec; 1053 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
767 REFERENCE_TIME requested_buffer_duration = 1054 REFERENCE_TIME requested_buffer_duration =
768 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); 1055 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
769 1056
770 // Initialize the audio stream between the client and the device. 1057 // Initialize the audio stream between the client and the device.
771 // For an exclusive-mode stream that uses event-driven buffering, the 1058 // For an exclusive-mode stream that uses event-driven buffering, the
772 // caller must specify nonzero values for hnsPeriodicity and 1059 // caller must specify nonzero values for hnsPeriodicity and
773 // hnsBufferDuration, and the values of these two parameters must be equal. 1060 // hnsBufferDuration, and the values of these two parameters must be equal.
774 // The Initialize method allocates two buffers for the stream. Each buffer 1061 // The Initialize method allocates two buffers for the stream. Each buffer
775 // is equal in duration to the value of the hnsBufferDuration parameter. 1062 // is equal in duration to the value of the hnsBufferDuration parameter.
776 // Following the Initialize call for a rendering stream, the caller should 1063 // Following the Initialize call for a rendering stream, the caller should
777 // fill the first of the two buffers before starting the stream. 1064 // fill the first of the two buffers before starting the stream.
778 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, 1065 HRESULT hr = S_FALSE;
779 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | 1066 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE,
780 AUDCLNT_STREAMFLAGS_NOPERSIST, 1067 AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
781 requested_buffer_duration, 1068 AUDCLNT_STREAMFLAGS_NOPERSIST,
782 requested_buffer_duration, 1069 requested_buffer_duration,
783 &format_, 1070 requested_buffer_duration,
784 NULL); 1071 reinterpret_cast<WAVEFORMATEX*>(&format_),
1072 NULL);
785 if (FAILED(hr)) { 1073 if (FAILED(hr)) {
786 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { 1074 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
787 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; 1075 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
788 1076
789 UINT32 aligned_buffer_size = 0; 1077 UINT32 aligned_buffer_size = 0;
790 audio_client_->GetBufferSize(&aligned_buffer_size); 1078 audio_client_->GetBufferSize(&aligned_buffer_size);
791 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; 1079 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
792 audio_client_.Release(); 1080 audio_client_.Release();
793 1081
794 // Calculate new aligned periodicity. Each unit of reference time 1082 // Calculate new aligned periodicity. Each unit of reference time
795 // is 100 nanoseconds. 1083 // is 100 nanoseconds.
796 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( 1084 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
797 (10000000.0 * aligned_buffer_size / format_.nSamplesPerSec) + 0.5); 1085 (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
1086 + 0.5);
798 1087
799 // It is possible to re-activate and re-initialize the audio client 1088 // It is possible to re-activate and re-initialize the audio client
800 // at this stage but we bail out with an error code instead and 1089 // at this stage but we bail out with an error code instead and
801 // combine it with a log message which informs about the suggested 1090 // combine it with a log message which informs about the suggested
802 // aligned buffer size which should be used instead. 1091 // aligned buffer size which should be used instead.
803 DVLOG(1) << "aligned_buffer_duration: " 1092 DVLOG(1) << "aligned_buffer_duration: "
804 << static_cast<double>(aligned_buffer_duration / 10000.0) 1093 << static_cast<double>(aligned_buffer_duration / 10000.0)
805 << " [ms]"; 1094 << " [ms]";
806 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { 1095 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) {
807 // We will get this error if we try to use a smaller buffer size than 1096 // We will get this error if we try to use a smaller buffer size than
(...skipping 19 matching lines...) Expand all
827 NOTREACHED() << "IMMNotificationClient should not use this method."; 1116 NOTREACHED() << "IMMNotificationClient should not use this method.";
828 if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) { 1117 if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) {
829 *object = static_cast < IMMNotificationClient*>(this); 1118 *object = static_cast < IMMNotificationClient*>(this);
830 } else { 1119 } else {
831 return E_NOINTERFACE; 1120 return E_NOINTERFACE;
832 } 1121 }
833 return S_OK; 1122 return S_OK;
834 } 1123 }
835 1124
836 STDMETHODIMP WASAPIAudioOutputStream::OnDeviceStateChanged(LPCWSTR device_id, 1125 STDMETHODIMP WASAPIAudioOutputStream::OnDeviceStateChanged(LPCWSTR device_id,
837 DWORD new_state) { 1126 DWORD new_state) {
scherkus (not reviewing) 2012/08/02 17:14:14 nit: this should be aligned at the ( if it doesn'
henrika (OOO until Aug 14) 2012/08/03 14:55:56 Done.
838 #ifndef NDEBUG 1127 #ifndef NDEBUG
839 std::string device_name = GetDeviceName(device_id); 1128 std::string device_name = GetDeviceName(device_id);
840 std::string device_state; 1129 std::string device_state;
841 1130
842 switch (new_state) { 1131 switch (new_state) {
843 case DEVICE_STATE_ACTIVE: 1132 case DEVICE_STATE_ACTIVE:
844 device_state = "ACTIVE"; 1133 device_state = "ACTIVE";
845 break; 1134 break;
846 case DEVICE_STATE_DISABLED: 1135 case DEVICE_STATE_DISABLED:
847 device_state = "DISABLED"; 1136 device_state = "DISABLED";
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
971 // are now re-initiated and it is now possible to re-start audio rendering. 1260 // are now re-initiated and it is now possible to re-start audio rendering.
972 1261
973 // Start rendering again using the new default audio endpoint. 1262 // Start rendering again using the new default audio endpoint.
974 hr = audio_client_->Start(); 1263 hr = audio_client_->Start();
975 1264
976 restart_rendering_mode_ = false; 1265 restart_rendering_mode_ = false;
977 return SUCCEEDED(hr); 1266 return SUCCEEDED(hr);
978 } 1267 }
979 1268
980 } // namespace media 1269 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698