Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(100)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 10823100: Adds support for multi-channel output audio for the low-latency path in Windows. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fixed nit Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/audio/win/audio_low_latency_output_win.h" 5 #include "media/audio/win/audio_low_latency_output_win.h"
6 6
7 #include <Functiondiscoverykeys_devpkey.h> 7 #include <Functiondiscoverykeys_devpkey.h>
8 8
9 #include "base/command_line.h" 9 #include "base/command_line.h"
10 #include "base/logging.h" 10 #include "base/logging.h"
11 #include "base/memory/scoped_ptr.h" 11 #include "base/memory/scoped_ptr.h"
12 #include "base/utf_string_conversions.h" 12 #include "base/utf_string_conversions.h"
13 #include "media/audio/audio_util.h" 13 #include "media/audio/audio_util.h"
14 #include "media/audio/win/audio_manager_win.h" 14 #include "media/audio/win/audio_manager_win.h"
15 #include "media/audio/win/avrt_wrapper_win.h" 15 #include "media/audio/win/avrt_wrapper_win.h"
16 #include "media/base/media_switches.h" 16 #include "media/base/media_switches.h"
17 17
18 using base::win::ScopedComPtr; 18 using base::win::ScopedComPtr;
19 using base::win::ScopedCOMInitializer; 19 using base::win::ScopedCOMInitializer;
20 using base::win::ScopedCoMem; 20 using base::win::ScopedCoMem;
21 21
22 namespace media { 22 namespace media {
23 23
24 typedef uint32 ChannelConfig;
25
26 // Ensure that the alignment of members will be on a boundary that is a
27 // multiple of 1 byte.
28 #pragma pack(push)
29 #pragma pack(1)
30
31 struct LayoutMono_16bit {
32 int16 center;
33 };
34
35 struct LayoutStereo_16bit {
36 int16 left;
37 int16 right;
38 };
39
40 struct Layout5_1_16bit {
41 int16 front_left;
42 int16 front_right;
43 int16 front_center;
44 int16 low_frequency;
45 int16 back_left;
46 int16 back_right;
47 };
48
49 struct Layout7_1_16bit {
50 int16 front_left;
51 int16 front_right;
52 int16 front_center;
53 int16 low_frequency;
54 int16 back_left;
55 int16 back_right;
56 int16 side_left;
57 int16 side_right;
58 };
59
60 #pragma pack(pop)
61
62 // Retrieves the stream format that the audio engine uses for its internal
63 // processing/mixing of shared-mode streams.
64 static HRESULT GetMixFormat(ERole device_role, WAVEFORMATEX** device_format) {
65 // Note that we are using the IAudioClient::GetMixFormat() API to get the
66 // device format in this function. It is in fact possible to be "more native",
67 // and ask the endpoint device directly for its properties. Given a reference
68 // to the IMMDevice interface of an endpoint object, a client can obtain a
69 // reference to the endpoint object's property store by calling the
70 // IMMDevice::OpenPropertyStore() method. However, I have not been able to
71 // access any valuable information using this method on my HP Z600 desktop,
72 // hence it feels more appropriate to use the IAudioClient::GetMixFormat()
73 // approach instead.
74
75 // Calling this function only makes sense for shared mode streams, since
76 // if the device will be opened in exclusive mode, then the application
77 // specified format is used instead. However, the result of this method can
78 // be useful for testing purposes so we don't DCHECK here.
79 DLOG_IF(WARNING, WASAPIAudioOutputStream::GetShareMode() ==
80 AUDCLNT_SHAREMODE_EXCLUSIVE) <<
81 "The mixing sample rate will be ignored for exclusive-mode streams.";
82
83 // It is assumed that this static method is called from a COM thread, i.e.,
84 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
85 ScopedComPtr<IMMDeviceEnumerator> enumerator;
86 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
87 NULL,
88 CLSCTX_INPROC_SERVER,
89 __uuidof(IMMDeviceEnumerator),
90 enumerator.ReceiveVoid());
91 if (FAILED(hr)) {
92 NOTREACHED() << "error code: " << std::hex << hr;
93 return 0.0;
henrika (OOO until Aug 14) 2012/08/03 19:56:29 BUG: We should return hr here and not 0.0.
94 }
95
96 ScopedComPtr<IMMDevice> endpoint_device;
97 hr = enumerator->GetDefaultAudioEndpoint(eRender,
98 device_role,
99 endpoint_device.Receive());
100 if (FAILED(hr)) {
101 // This will happen if there's no audio output device found or available
102 // (e.g. some audio cards that have outputs will still report them as
103 // "not found" when no speaker is plugged into the output jack).
104 LOG(WARNING) << "No audio end point: " << std::hex << hr;
105 return 0.0;
henrika (OOO until Aug 14) 2012/08/03 19:56:29 ditto
106 }
107
108 ScopedComPtr<IAudioClient> audio_client;
109 hr = endpoint_device->Activate(__uuidof(IAudioClient),
110 CLSCTX_INPROC_SERVER,
111 NULL,
112 audio_client.ReceiveVoid());
113 DCHECK(SUCCEEDED(hr)) << "Failed to activate device: " << std::hex << hr;
114 if (SUCCEEDED(hr)) {
115 hr = audio_client->GetMixFormat(device_format);
116 DCHECK(SUCCEEDED(hr)) << "GetMixFormat: " << std::hex << hr;
117 }
118
119 return hr;
120 }
121
122 // Retrieves an integer mask which corresponds to the channel layout the
123 // audio engine uses for its internal processing/mixing of shared-mode
124 // streams. This mask indicates which channels are present in the multi-
125 // channel stream. The least significant bit corresponds with the Front Left
126 // speaker, the next least significant bit corresponds to the Front Right
127 // speaker, and so on, continuing in the order defined in KsMedia.h.
128 // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85 ).aspx
129 // for more details.
130 static ChannelConfig GetChannelConfig() {
131 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
132 // number of channels and the mapping of channels to speakers for
133 // multichannel devices.
134 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
135 HRESULT hr = S_FALSE;
136 hr = GetMixFormat(eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
137 if (FAILED(hr))
138 return 0;
139
140 // The dwChannelMask member specifies which channels are present in the
141 // multichannel stream. The least significant bit corresponds to the
142 // front left speaker, the next least significant bit corresponds to the
143 // front right speaker, and so on.
144 // See http://msdn.microsoft.com/en-us/library/windows/desktop/dd757714(v=vs.8 5).aspx
145 // for more details on the channel mapping.
146 DVLOG(2) << "dwChannelMask: 0x" << std::hex << format_ex->dwChannelMask;
147
148 #if !defined(NDEBUG)
149 // See http://en.wikipedia.org/wiki/Surround_sound for more details on
150 // how to name various speaker configurations. The list below is not complete.
151 const char* speaker_config = "Undefined";
152 switch (format_ex->dwChannelMask) {
153 case KSAUDIO_SPEAKER_MONO:
154 speaker_config = "Mono";
155 break;
156 case KSAUDIO_SPEAKER_STEREO:
157 speaker_config = "Stereo";
158 break;
159 case KSAUDIO_SPEAKER_5POINT1_SURROUND:
160 speaker_config = "5.1 surround";
161 break;
162 case KSAUDIO_SPEAKER_5POINT1:
163 speaker_config = "5.1";
164 break;
165 case KSAUDIO_SPEAKER_7POINT1_SURROUND:
166 speaker_config = "7.1 surround";
167 break;
168 case KSAUDIO_SPEAKER_7POINT1:
169 speaker_config = "7.1";
170 break;
171 default:
172 break;
173 }
174 DVLOG(2) << "speaker configuration: " << speaker_config;
175 #endif
176
177 return static_cast<ChannelConfig>(format_ex->dwChannelMask);
178 }
179
180 // Converts Microsoft's channel configuration to ChannelLayout.
181 // This mapping is not perfect but the best we can do given the current
182 // ChannelLayout enumerator and the Windows-specific speaker configurations
183 // defined in ksmedia.h. Don't assume that the channel ordering in
184 // ChannelLayout is exactly the same as the Windows specific configuration.
185 // As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
186 // CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
187 // speakers are different in these two definitions.
188 static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
189 switch (config) {
190 case KSAUDIO_SPEAKER_DIRECTOUT:
191 return CHANNEL_LAYOUT_NONE;
192 case KSAUDIO_SPEAKER_MONO:
193 return CHANNEL_LAYOUT_MONO;
194 case KSAUDIO_SPEAKER_STEREO:
195 return CHANNEL_LAYOUT_STEREO;
196 case KSAUDIO_SPEAKER_QUAD:
197 return CHANNEL_LAYOUT_QUAD;
198 case KSAUDIO_SPEAKER_SURROUND:
199 return CHANNEL_LAYOUT_4_0;
200 case KSAUDIO_SPEAKER_5POINT1:
201 return CHANNEL_LAYOUT_5_1_BACK;
202 case KSAUDIO_SPEAKER_5POINT1_SURROUND:
203 return CHANNEL_LAYOUT_5_1;
204 case KSAUDIO_SPEAKER_7POINT1:
205 return CHANNEL_LAYOUT_7_1_WIDE;
206 case KSAUDIO_SPEAKER_7POINT1_SURROUND:
207 return CHANNEL_LAYOUT_7_1;
208 default:
209 DVLOG(1) << "Unsupported channel layout: " << config;
210 return CHANNEL_LAYOUT_UNSUPPORTED;
211 }
212 }
213
214 // mono/stereo -> N.1 up-mixing where N=out_channels-1.
215 // See http://www.w3.org/TR/webaudio/#UpMix-sub for details.
216 // TODO(henrika): try to reduce the size of this function.
217 // TODO(henrika): use ChannelLayout for channel parameters.
218 // TODO(henrika): can we do this in-place by processing the samples in
219 // reverse order when sizeof(out) > sizeof(in) (upmixing)?
220 // TODO(henrika): add support for other bit-depths as well?
221 static int ChannelUpMix(void* input,
222 void* output,
223 int in_channels,
224 int out_channels,
225 size_t number_of_input_bytes,
226 int bytes_per_sample) {
227 DCHECK_GT(out_channels, in_channels);
228 DCHECK_EQ(bytes_per_sample, 2);
229
230 if (bytes_per_sample != 2) {
231 LOG(ERROR) << "Only 16-bit samples are supported.";
232 return 0;
233 }
234
235 const int kChannelRatio = out_channels / in_channels;
236
237 // 1 -> 2
238 if (in_channels == 1 && out_channels == 2) {
239 LayoutMono_16bit* in = reinterpret_cast<LayoutMono_16bit*>(input);
240 LayoutStereo_16bit* out = reinterpret_cast<LayoutStereo_16bit*>(output);
241 int number_of_input_mono_samples = (number_of_input_bytes >> 1);
242
243 // Copy same input mono sample to both output channels.
244 for (int i = 0; i < number_of_input_mono_samples; ++i) {
245 out->left = in->center;
246 out->right = in->center;
247 in++;
248 out++;
249 }
250
251 return (kChannelRatio * number_of_input_bytes);
252 }
253
254 // 1 -> 7.1
255 if (in_channels == 1 && out_channels == 8) {
256 LayoutMono_16bit* in = reinterpret_cast<LayoutMono_16bit*>(input);
257 Layout7_1_16bit* out = reinterpret_cast<Layout7_1_16bit*>(output);
258 int number_of_input_mono_samples = (number_of_input_bytes >> 1);
259
260 // Zero out all frames first.
261 memset(out, 0, number_of_input_mono_samples * sizeof(out[0]));
262
263 // Copy input sample to output center channel.
264 for (int i = 0; i < number_of_input_mono_samples; ++i) {
265 out->front_center = in->center;
266 in++;
267 out++;
268 }
269
270 return (kChannelRatio * number_of_input_bytes);
271 }
272
273 // 2 -> 5.1
274 if (in_channels == 2 && out_channels == 6) {
275 LayoutStereo_16bit* in = reinterpret_cast<LayoutStereo_16bit*>(input);
276 Layout5_1_16bit* out = reinterpret_cast<Layout5_1_16bit*>(output);
277 int number_of_input_stereo_samples = (number_of_input_bytes >> 2);
278
279 // Zero out all frames first.
280 memset(out, 0, number_of_input_stereo_samples * sizeof(out[0]));
281
282 // Copy left and right input channels to the same output channels.
283 for (int i = 0; i < number_of_input_stereo_samples; ++i) {
284 out->front_left = in->left;
285 out->front_right = in->right;
286 in++;
287 out++;
288 }
289
290 return (kChannelRatio * number_of_input_bytes);
291 }
292
293 // 2 -> 7.1
294 if (in_channels == 2 && out_channels == 8) {
295 LayoutStereo_16bit* in = reinterpret_cast<LayoutStereo_16bit*>(input);
296 Layout7_1_16bit* out = reinterpret_cast<Layout7_1_16bit*>(output);
297 int number_of_input_stereo_samples = (number_of_input_bytes >> 2);
298
299 // Zero out all frames first.
300 memset(out, 0, number_of_input_stereo_samples * sizeof(out[0]));
301
302 // Copy left and right input channels to the same output channels.
303 for (int i = 0; i < number_of_input_stereo_samples; ++i) {
304 out->front_left = in->left;
305 out->front_right = in->right;
306 in++;
307 out++;
308 }
309
310 return (kChannelRatio * number_of_input_bytes);
311 }
312
313 LOG(ERROR) << "Up-mixing " << in_channels << "->"
314 << out_channels << " is not supported.";
315 return 0;
316 }
317
24 // static 318 // static
25 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { 319 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
26 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); 320 const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
27 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) 321 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
28 return AUDCLNT_SHAREMODE_EXCLUSIVE; 322 return AUDCLNT_SHAREMODE_EXCLUSIVE;
29 return AUDCLNT_SHAREMODE_SHARED; 323 return AUDCLNT_SHAREMODE_SHARED;
30 } 324 }
31 325
32 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, 326 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
33 const AudioParameters& params, 327 const AudioParameters& params,
34 ERole device_role) 328 ERole device_role)
35 : com_init_(ScopedCOMInitializer::kMTA), 329 : com_init_(ScopedCOMInitializer::kMTA),
36 creating_thread_id_(base::PlatformThread::CurrentId()), 330 creating_thread_id_(base::PlatformThread::CurrentId()),
37 manager_(manager), 331 manager_(manager),
38 render_thread_(NULL), 332 render_thread_(NULL),
39 opened_(false), 333 opened_(false),
40 started_(false), 334 started_(false),
41 restart_rendering_mode_(false), 335 restart_rendering_mode_(false),
42 volume_(1.0), 336 volume_(1.0),
43 endpoint_buffer_size_frames_(0), 337 endpoint_buffer_size_frames_(0),
44 device_role_(device_role), 338 device_role_(device_role),
45 share_mode_(GetShareMode()), 339 share_mode_(GetShareMode()),
340 client_channel_count_(params.channels()),
46 num_written_frames_(0), 341 num_written_frames_(0),
47 source_(NULL) { 342 source_(NULL) {
48 CHECK(com_init_.succeeded()); 343 CHECK(com_init_.succeeded());
49 DCHECK(manager_); 344 DCHECK(manager_);
50 345
51 // Load the Avrt DLL if not already loaded. Required to support MMCSS. 346 // Load the Avrt DLL if not already loaded. Required to support MMCSS.
52 bool avrt_init = avrt::Initialize(); 347 bool avrt_init = avrt::Initialize();
53 DCHECK(avrt_init) << "Failed to load the avrt.dll"; 348 DCHECK(avrt_init) << "Failed to load the avrt.dll";
54 349
55 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE) { 350 if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) {
56 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<"; 351 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<";
57 } 352 }
58 353
59 // Set up the desired render format specified by the client. 354 // Set up the desired render format specified by the client. We use the
60 format_.nSamplesPerSec = params.sample_rate(); 355 // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
61 format_.wFormatTag = WAVE_FORMAT_PCM; 356 // and high precision data can be supported.
62 format_.wBitsPerSample = params.bits_per_sample(); 357
63 format_.nChannels = params.channels(); 358 // Begin with the WAVEFORMATEX structure that specifies the basic format.
64 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; 359 WAVEFORMATEX* format = &format_.Format;
65 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; 360 format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
66 format_.cbSize = 0; 361 format->nChannels = HardwareChannelCount();
362 format->nSamplesPerSec = params.sample_rate();
363 format->wBitsPerSample = params.bits_per_sample();
364 format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
365 format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
366 format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
367
368 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
369 format_.Samples.wValidBitsPerSample = params.bits_per_sample();
370 format_.dwChannelMask = GetChannelConfig();
371 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
67 372
68 // Size in bytes of each audio frame. 373 // Size in bytes of each audio frame.
69 frame_size_ = format_.nBlockAlign; 374 frame_size_ = format->nBlockAlign;
375
376 // It is possible to set the number of channels in |params| to a lower value
377 // than we use as the internal number of audio channels when the audio stream
378 // is opened. If this mode (channel_factor_ > 1) is set, the native audio
379 // layer will expect a larger number of channels in the interleaved audio
380 // stream and a channel up-mix will be performed after the OnMoreData()
381 // callback to compensate for the lower number of channels provided by the
382 // audio source.
383 // Example: params.channels() is 2 and endpoint_channel_count() is 8 =>
384 // the audio stream is opened up in 7.1 surround mode but the source only
385 // provides a stereo signal as input, i.e., a stereo up-mix (2 -> 7.1) will
386 // take place before sending the stream to the audio driver.
387 DVLOG(1) << "Channel mixing " << client_channel_count_ << "->"
388 << endpoint_channel_count() << " is requested.";
389 LOG_IF(ERROR, channel_factor() < 1)
390 << "Channel mixing " << client_channel_count_ << "->"
391 << endpoint_channel_count() << " is not supported.";
70 392
71 // Store size (in different units) of audio packets which we expect to 393 // Store size (in different units) of audio packets which we expect to
72 // get from the audio endpoint device in each render event. 394 // get from the audio endpoint device in each render event.
73 packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign; 395 packet_size_frames_ =
74 packet_size_bytes_ = params.GetBytesPerBuffer(); 396 (channel_factor() * params.GetBytesPerBuffer()) / format->nBlockAlign;
397 packet_size_bytes_ = channel_factor() * params.GetBytesPerBuffer();
75 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate(); 398 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate();
76 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_; 399 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
77 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; 400 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
78 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; 401 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_;
79 402
80 // All events are auto-reset events and non-signaled initially. 403 // All events are auto-reset events and non-signaled initially.
81 404
82 // Create the event which the audio engine will signal each time 405 // Create the event which the audio engine will signal each time
83 // a buffer becomes ready to be processed by the client. 406 // a buffer becomes ready to be processed by the client.
84 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); 407 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
85 DCHECK(audio_samples_render_event_.IsValid()); 408 DCHECK(audio_samples_render_event_.IsValid());
86 409
87 // Create the event which will be set in Stop() when capturing shall stop. 410 // Create the event which will be set in Stop() when capturing shall stop.
88 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); 411 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
89 DCHECK(stop_render_event_.IsValid()); 412 DCHECK(stop_render_event_.IsValid());
90 413
91 // Create the event which will be set when a stream switch shall take place. 414 // Create the event which will be set when a stream switch shall take place.
92 stream_switch_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); 415 stream_switch_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
93 DCHECK(stream_switch_event_.IsValid()); 416 DCHECK(stream_switch_event_.IsValid());
94 } 417 }
95 418
96 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} 419 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {}
97 420
98 bool WASAPIAudioOutputStream::Open() { 421 bool WASAPIAudioOutputStream::Open() {
99 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); 422 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
100 if (opened_) 423 if (opened_)
101 return true; 424 return true;
102 425
426 // Down-mixing is currently not supported. The number of channels provided
427 // by the audio source must be less than or equal to the number of native
428 // channels (given by endpoint_channel_count()) which is the channel count
429 // used when opening the default endpoint device.
430 if (channel_factor() < 1) {
431 LOG(ERROR) << "Channel down-mixing is not supported";
432 return false;
433 }
434
435 // Only 16-bit audio is supported in combination with channel up-mixing.
436 if (channel_factor() > 1 && (format_.Format.wBitsPerSample != 16)) {
437 LOG(ERROR) << "16-bit audio is required when channel up-mixing is active.";
438 return false;
439 }
440
103 // Create an IMMDeviceEnumerator interface and obtain a reference to 441 // Create an IMMDeviceEnumerator interface and obtain a reference to
104 // the IMMDevice interface of the default rendering device with the 442 // the IMMDevice interface of the default rendering device with the
105 // specified role. 443 // specified role.
106 HRESULT hr = SetRenderDevice(); 444 HRESULT hr = SetRenderDevice();
107 if (FAILED(hr)) { 445 if (FAILED(hr)) {
108 return false; 446 return false;
109 } 447 }
110 448
111 // Obtain an IAudioClient interface which enables us to create and initialize 449 // Obtain an IAudioClient interface which enables us to create and initialize
112 // an audio stream between an audio application and the audio engine. 450 // an audio stream between an audio application and the audio engine.
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
238 hr = audio_client_->Reset(); 576 hr = audio_client_->Reset();
239 if (FAILED(hr)) { 577 if (FAILED(hr)) {
240 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) 578 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
241 << "Failed to reset streaming: " << std::hex << hr; 579 << "Failed to reset streaming: " << std::hex << hr;
242 } 580 }
243 581
244 // Extra safety check to ensure that the buffers are cleared. 582 // Extra safety check to ensure that the buffers are cleared.
245 // If the buffers are not cleared correctly, the next call to Start() 583 // If the buffers are not cleared correctly, the next call to Start()
246 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). 584 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
247 // This check is is only needed for shared-mode streams. 585 // This check is is only needed for shared-mode streams.
248 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { 586 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
249 UINT32 num_queued_frames = 0; 587 UINT32 num_queued_frames = 0;
250 audio_client_->GetCurrentPadding(&num_queued_frames); 588 audio_client_->GetCurrentPadding(&num_queued_frames);
251 DCHECK_EQ(0u, num_queued_frames); 589 DCHECK_EQ(0u, num_queued_frames);
252 } 590 }
253 591
254 // Ensure that we don't quit the main thread loop immediately next 592 // Ensure that we don't quit the main thread loop immediately next
255 // time Start() is called. 593 // time Start() is called.
256 ResetEvent(stop_render_event_.Get()); 594 ResetEvent(stop_render_event_.Get());
257 595
258 started_ = false; 596 started_ = false;
(...skipping 27 matching lines...) Expand all
286 } 624 }
287 volume_ = volume_float; 625 volume_ = volume_float;
288 } 626 }
289 627
290 void WASAPIAudioOutputStream::GetVolume(double* volume) { 628 void WASAPIAudioOutputStream::GetVolume(double* volume) {
291 DVLOG(1) << "GetVolume()"; 629 DVLOG(1) << "GetVolume()";
292 *volume = static_cast<double>(volume_); 630 *volume = static_cast<double>(volume_);
293 } 631 }
294 632
295 // static 633 // static
634 int WASAPIAudioOutputStream::HardwareChannelCount() {
635 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
636 // number of channels and the mapping of channels to speakers for
637 // multichannel devices.
638 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
639 HRESULT hr = GetMixFormat(
640 eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
641 if (FAILED(hr))
642 return 0;
643
644 // Number of channels in the stream. Corresponds to the number of bits
645 // set in the dwChannelMask.
646 DVLOG(1) << "endpoint channels (out): " << format_ex->Format.nChannels;
647
648 return static_cast<int>(format_ex->Format.nChannels);
649 }
650
651 // static
652 ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() {
653 return ChannelConfigToChannelLayout(GetChannelConfig());
654 }
655
656 // static
296 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { 657 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
297 // Calling this function only makes sense for shared mode streams, since 658 base::win::ScopedCoMem<WAVEFORMATEX> format;
298 // if the device will be opened in exclusive mode, then the application 659 HRESULT hr = GetMixFormat(device_role, &format);
299 // specified format is used instead. However, the result of this method can 660 if (FAILED(hr))
300 // be useful for testing purposes so we don't DCHECK here. 661 return 0;
301 DLOG_IF(WARNING, GetShareMode() == AUDCLNT_SHAREMODE_EXCLUSIVE) <<
302 "The mixing sample rate will be ignored for exclusive-mode streams.";
303 662
304 // It is assumed that this static method is called from a COM thread, i.e., 663 DVLOG(2) << "nSamplesPerSec: " << format->nSamplesPerSec;
305 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. 664 return static_cast<int>(format->nSamplesPerSec);
306 ScopedComPtr<IMMDeviceEnumerator> enumerator;
307 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
308 NULL,
309 CLSCTX_INPROC_SERVER,
310 __uuidof(IMMDeviceEnumerator),
311 enumerator.ReceiveVoid());
312 if (FAILED(hr)) {
313 NOTREACHED() << "error code: " << std::hex << hr;
314 return 0.0;
315 }
316
317 ScopedComPtr<IMMDevice> endpoint_device;
318 hr = enumerator->GetDefaultAudioEndpoint(eRender,
319 device_role,
320 endpoint_device.Receive());
321 if (FAILED(hr)) {
322 // This will happen if there's no audio output device found or available
323 // (e.g. some audio cards that have outputs will still report them as
324 // "not found" when no speaker is plugged into the output jack).
325 LOG(WARNING) << "No audio end point: " << std::hex << hr;
326 return 0.0;
327 }
328
329 ScopedComPtr<IAudioClient> audio_client;
330 hr = endpoint_device->Activate(__uuidof(IAudioClient),
331 CLSCTX_INPROC_SERVER,
332 NULL,
333 audio_client.ReceiveVoid());
334 if (FAILED(hr)) {
335 NOTREACHED() << "error code: " << std::hex << hr;
336 return 0.0;
337 }
338
339 // Retrieve the stream format that the audio engine uses for its internal
340 // processing of shared-mode streams.
341 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
342 hr = audio_client->GetMixFormat(&audio_engine_mix_format);
343 if (FAILED(hr)) {
344 NOTREACHED() << "error code: " << std::hex << hr;
345 return 0.0;
346 }
347
348 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
349 } 665 }
350 666
351 void WASAPIAudioOutputStream::Run() { 667 void WASAPIAudioOutputStream::Run() {
352 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); 668 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
353 669
354 // Increase the thread priority. 670 // Increase the thread priority.
355 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); 671 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
356 672
357 // Enable MMCSS to ensure that this thread receives prioritized access to 673 // Enable MMCSS to ensure that this thread receives prioritized access to
358 // CPU resources. 674 // CPU resources.
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
419 { 735 {
420 // |audio_samples_render_event_| has been set. 736 // |audio_samples_render_event_| has been set.
421 UINT32 num_queued_frames = 0; 737 UINT32 num_queued_frames = 0;
422 uint8* audio_data = NULL; 738 uint8* audio_data = NULL;
423 739
424 // Contains how much new data we can write to the buffer without 740 // Contains how much new data we can write to the buffer without
425 // the risk of overwriting previously written data that the audio 741 // the risk of overwriting previously written data that the audio
426 // engine has not yet read from the buffer. 742 // engine has not yet read from the buffer.
427 size_t num_available_frames = 0; 743 size_t num_available_frames = 0;
428 744
429 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { 745 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
430 // Get the padding value which represents the amount of rendering 746 // Get the padding value which represents the amount of rendering
431 // data that is queued up to play in the endpoint buffer. 747 // data that is queued up to play in the endpoint buffer.
432 hr = audio_client_->GetCurrentPadding(&num_queued_frames); 748 hr = audio_client_->GetCurrentPadding(&num_queued_frames);
433 num_available_frames = 749 num_available_frames =
434 endpoint_buffer_size_frames_ - num_queued_frames; 750 endpoint_buffer_size_frames_ - num_queued_frames;
435 } else { 751 } else {
436 // While the stream is running, the system alternately sends one 752 // While the stream is running, the system alternately sends one
437 // buffer or the other to the client. This form of double buffering 753 // buffer or the other to the client. This form of double buffering
438 // is referred to as "ping-ponging". Each time the client receives 754 // is referred to as "ping-ponging". Each time the client receives
439 // a buffer from the system (triggers this event) the client must 755 // a buffer from the system (triggers this event) the client must
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
471 // a render event and the time when the first audio sample in a 787 // a render event and the time when the first audio sample in a
472 // packet is played out through the speaker. This delay value 788 // packet is played out through the speaker. This delay value
473 // can typically be utilized by an acoustic echo-control (AEC) 789 // can typically be utilized by an acoustic echo-control (AEC)
474 // unit at the render side. 790 // unit at the render side.
475 UINT64 position = 0; 791 UINT64 position = 0;
476 int audio_delay_bytes = 0; 792 int audio_delay_bytes = 0;
477 hr = audio_clock->GetPosition(&position, NULL); 793 hr = audio_clock->GetPosition(&position, NULL);
478 if (SUCCEEDED(hr)) { 794 if (SUCCEEDED(hr)) {
479 // Stream position of the sample that is currently playing 795 // Stream position of the sample that is currently playing
480 // through the speaker. 796 // through the speaker.
481 double pos_sample_playing_frames = format_.nSamplesPerSec * 797 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
482 (static_cast<double>(position) / device_frequency); 798 (static_cast<double>(position) / device_frequency);
483 799
484 // Stream position of the last sample written to the endpoint 800 // Stream position of the last sample written to the endpoint
485 // buffer. Note that, the packet we are about to receive in 801 // buffer. Note that, the packet we are about to receive in
486 // the upcoming callback is also included. 802 // the upcoming callback is also included.
487 size_t pos_last_sample_written_frames = 803 size_t pos_last_sample_written_frames =
488 num_written_frames_ + packet_size_frames_; 804 num_written_frames_ + packet_size_frames_;
489 805
490 // Derive the actual delay value which will be fed to the 806 // Derive the actual delay value which will be fed to the
491 // render client using the OnMoreData() callback. 807 // render client using the OnMoreData() callback.
492 audio_delay_bytes = (pos_last_sample_written_frames - 808 audio_delay_bytes = (pos_last_sample_written_frames -
493 pos_sample_playing_frames) * frame_size_; 809 pos_sample_playing_frames) * frame_size_;
494 } 810 }
495 811
496 // Read a data packet from the registered client source and 812 // Read a data packet from the registered client source and
497 // deliver a delay estimate in the same callback to the client. 813 // deliver a delay estimate in the same callback to the client.
498 // A time stamp is also stored in the AudioBuffersState. This 814 // A time stamp is also stored in the AudioBuffersState. This
499 // time stamp can be used at the client side to compensate for 815 // time stamp can be used at the client side to compensate for
500 // the delay between the usage of the delay value and the time 816 // the delay between the usage of the delay value and the time
501 // of generation. 817 // of generation.
502 uint32 num_filled_bytes = source_->OnMoreData( 818
503 audio_data, packet_size_bytes_, 819 uint32 num_filled_bytes = 0;
504 AudioBuffersState(0, audio_delay_bytes)); 820 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
821
822 if (channel_factor() == 1) {
823 // Case I: no up-mixing.
824 num_filled_bytes = source_->OnMoreData(
825 audio_data, packet_size_bytes_,
826 AudioBuffersState(0, audio_delay_bytes));
827 } else {
828 // Case II: up-mixing.
829 const int audio_source_size_bytes =
830 packet_size_bytes_ / channel_factor();
831 scoped_array<uint8> buffer;
832 buffer.reset(new uint8[audio_source_size_bytes]);
833
834 num_filled_bytes = source_->OnMoreData(
835 buffer.get(), audio_source_size_bytes,
836 AudioBuffersState(0, audio_delay_bytes));
837
838 // Do channel up-mixing on 16-bit PCM samples.
839 num_filled_bytes = ChannelUpMix(buffer.get(),
840 &audio_data[0],
841 client_channel_count_,
842 endpoint_channel_count(),
843 num_filled_bytes,
844 bytes_per_sample);
845 }
505 846
506 // Perform in-place, software-volume adjustments. 847 // Perform in-place, software-volume adjustments.
848 // TODO(henrika): it is possible to adjust the volume in the
849 // ChannelUpMix() function.
507 media::AdjustVolume(audio_data, 850 media::AdjustVolume(audio_data,
508 num_filled_bytes, 851 num_filled_bytes,
509 format_.nChannels, 852 endpoint_channel_count(),
510 format_.wBitsPerSample >> 3, 853 bytes_per_sample,
511 volume_); 854 volume_);
512 855
513 // Zero out the part of the packet which has not been filled by 856 // Zero out the part of the packet which has not been filled by
514 // the client. Using silence is the least bad option in this 857 // the client. Using silence is the least bad option in this
515 // situation. 858 // situation.
516 if (num_filled_bytes < packet_size_bytes_) { 859 if (num_filled_bytes < packet_size_bytes_) {
517 memset(&audio_data[num_filled_bytes], 0, 860 memset(&audio_data[num_filled_bytes], 0,
518 (packet_size_bytes_ - num_filled_bytes)); 861 (packet_size_bytes_ - num_filled_bytes));
519 } 862 }
520 863
521 // Release the buffer space acquired in the GetBuffer() call. 864 // Release the buffer space acquired in the GetBuffer() call.
522 DWORD flags = 0; 865 DWORD flags = 0;
523 audio_render_client_->ReleaseBuffer(packet_size_frames_, 866 audio_render_client_->ReleaseBuffer(packet_size_frames_,
524 flags); 867 flags);
525 868
526 num_written_frames_ += packet_size_frames_; 869 num_written_frames_ += packet_size_frames_;
527 } 870 }
528 } 871 }
(...skipping 22 matching lines...) Expand all
551 NOTREACHED() << "Error code: " << std::hex << err; 894 NOTREACHED() << "Error code: " << std::hex << err;
552 if (source_) 895 if (source_)
553 source_->OnError(this, static_cast<int>(err)); 896 source_->OnError(this, static_cast<int>(err));
554 } 897 }
555 898
556 HRESULT WASAPIAudioOutputStream::SetRenderDevice() { 899 HRESULT WASAPIAudioOutputStream::SetRenderDevice() {
557 ScopedComPtr<IMMDeviceEnumerator> device_enumerator; 900 ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
558 ScopedComPtr<IMMDevice> endpoint_device; 901 ScopedComPtr<IMMDevice> endpoint_device;
559 902
560 // Create the IMMDeviceEnumerator interface. 903 // Create the IMMDeviceEnumerator interface.
561 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), 904 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
562 NULL, 905 NULL,
563 CLSCTX_INPROC_SERVER, 906 CLSCTX_INPROC_SERVER,
564 __uuidof(IMMDeviceEnumerator), 907 __uuidof(IMMDeviceEnumerator),
565 device_enumerator.ReceiveVoid()); 908 device_enumerator.ReceiveVoid());
566 if (SUCCEEDED(hr)) { 909 if (SUCCEEDED(hr)) {
567 // Retrieve the default render audio endpoint for the specified role. 910 // Retrieve the default render audio endpoint for the specified role.
568 // Note that, in Windows Vista, the MMDevice API supports device roles 911 // Note that, in Windows Vista, the MMDevice API supports device roles
569 // but the system-supplied user interface programs do not. 912 // but the system-supplied user interface programs do not.
570 hr = device_enumerator->GetDefaultAudioEndpoint( 913 hr = device_enumerator->GetDefaultAudioEndpoint(
571 eRender, device_role_, endpoint_device.Receive()); 914 eRender, device_role_, endpoint_device.Receive());
572 if (FAILED(hr)) 915 if (FAILED(hr))
573 return hr; 916 return hr;
574 917
575 // Verify that the audio endpoint device is active. That is, the audio 918 // Verify that the audio endpoint device is active. That is, the audio
(...skipping 22 matching lines...) Expand all
598 // Creates and activates an IAudioClient COM object given the selected 941 // Creates and activates an IAudioClient COM object given the selected
599 // render endpoint device. 942 // render endpoint device.
600 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), 943 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
601 CLSCTX_INPROC_SERVER, 944 CLSCTX_INPROC_SERVER,
602 NULL, 945 NULL,
603 audio_client.ReceiveVoid()); 946 audio_client.ReceiveVoid());
604 if (SUCCEEDED(hr)) { 947 if (SUCCEEDED(hr)) {
605 // Retrieve the stream format that the audio engine uses for its internal 948 // Retrieve the stream format that the audio engine uses for its internal
606 // processing/mixing of shared-mode streams. 949 // processing/mixing of shared-mode streams.
607 audio_engine_mix_format_.Reset(NULL); 950 audio_engine_mix_format_.Reset(NULL);
608 hr = audio_client->GetMixFormat(&audio_engine_mix_format_); 951 hr = audio_client->GetMixFormat(
952 reinterpret_cast<WAVEFORMATEX**>(&audio_engine_mix_format_));
609 953
610 if (SUCCEEDED(hr)) { 954 if (SUCCEEDED(hr)) {
611 audio_client_ = audio_client; 955 audio_client_ = audio_client;
612 } 956 }
613 } 957 }
614 958
615 return hr; 959 return hr;
616 } 960 }
617 961
618 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { 962 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() {
619 // Determine, before calling IAudioClient::Initialize(), whether the audio 963 // Determine, before calling IAudioClient::Initialize(), whether the audio
620 // engine supports a particular stream format. 964 // engine supports a particular stream format.
621 // In shared mode, the audio engine always supports the mix format, 965 // In shared mode, the audio engine always supports the mix format,
622 // which is stored in the |audio_engine_mix_format_| member and it is also 966 // which is stored in the |audio_engine_mix_format_| member and it is also
623 // possible to receive a proposed (closest) format if the current format is 967 // possible to receive a proposed (closest) format if the current format is
624 // not supported. 968 // not supported.
625 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; 969 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
626 HRESULT hr = audio_client_->IsFormatSupported(share_mode(), 970 HRESULT hr = audio_client_->IsFormatSupported(
627 &format_, 971 share_mode_, reinterpret_cast<WAVEFORMATEX*>(&format_),
628 &closest_match); 972 reinterpret_cast<WAVEFORMATEX**>(&closest_match));
629 973
630 // This log can only be triggered for shared mode. 974 // This log can only be triggered for shared mode.
631 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " 975 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
632 << "but a closest match exists."; 976 << "but a closest match exists.";
633 // This log can be triggered both for shared and exclusive modes. 977 // This log can be triggered both for shared and exclusive modes.
634 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format."; 978 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
635 if (hr == S_FALSE) { 979 if (hr == S_FALSE) {
636 DVLOG(1) << "wFormatTag : " << closest_match->wFormatTag; 980 DVLOG(1) << "wFormatTag : " << closest_match->Format.wFormatTag;
637 DVLOG(1) << "nChannels : " << closest_match->nChannels; 981 DVLOG(1) << "nChannels : " << closest_match->Format.nChannels;
638 DVLOG(1) << "nSamplesPerSec: " << closest_match->nSamplesPerSec; 982 DVLOG(1) << "nSamplesPerSec: " << closest_match->Format.nSamplesPerSec;
639 DVLOG(1) << "wBitsPerSample: " << closest_match->wBitsPerSample; 983 DVLOG(1) << "wBitsPerSample: " << closest_match->Format.wBitsPerSample;
640 } 984 }
641 985
642 return (hr == S_OK); 986 return (hr == S_OK);
643 } 987 }
644 988
645 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { 989 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() {
646 #if !defined(NDEBUG) 990 #if !defined(NDEBUG)
647 // The period between processing passes by the audio engine is fixed for a 991 // The period between processing passes by the audio engine is fixed for a
648 // particular audio endpoint device and represents the smallest processing 992 // particular audio endpoint device and represents the smallest processing
649 // quantum for the audio engine. This period plus the stream latency between 993 // quantum for the audio engine. This period plus the stream latency between
(...skipping 21 matching lines...) Expand all
671 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) 1015 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0)
672 << " [ms]"; 1016 << " [ms]";
673 } 1017 }
674 } 1018 }
675 #endif 1019 #endif
676 1020
677 HRESULT hr = S_FALSE; 1021 HRESULT hr = S_FALSE;
678 1022
679 // Perform different initialization depending on if the device shall be 1023 // Perform different initialization depending on if the device shall be
680 // opened in shared mode or in exclusive mode. 1024 // opened in shared mode or in exclusive mode.
681 hr = (share_mode() == AUDCLNT_SHAREMODE_SHARED) ? 1025 hr = (share_mode_ == AUDCLNT_SHAREMODE_SHARED) ?
682 SharedModeInitialization() : ExclusiveModeInitialization(); 1026 SharedModeInitialization() : ExclusiveModeInitialization();
683 if (FAILED(hr)) { 1027 if (FAILED(hr)) {
684 LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr; 1028 LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr;
685 return hr; 1029 return hr;
686 } 1030 }
687 1031
688 // Retrieve the length of the endpoint buffer. The buffer length represents 1032 // Retrieve the length of the endpoint buffer. The buffer length represents
689 // the maximum amount of rendering data that the client can write to 1033 // the maximum amount of rendering data that the client can write to
690 // the endpoint buffer during a single processing pass. 1034 // the endpoint buffer during a single processing pass.
691 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. 1035 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
692 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); 1036 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
693 if (FAILED(hr)) 1037 if (FAILED(hr))
694 return hr; 1038 return hr;
695 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ 1039 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
696 << " [frames]"; 1040 << " [frames]";
697 1041
698 // The buffer scheme for exclusive mode streams is not designed for max 1042 // The buffer scheme for exclusive mode streams is not designed for max
699 // flexibility. We only allow a "perfect match" between the packet size set 1043 // flexibility. We only allow a "perfect match" between the packet size set
700 // by the user and the actual endpoint buffer size. 1044 // by the user and the actual endpoint buffer size.
701 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE && 1045 if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE &&
702 endpoint_buffer_size_frames_ != packet_size_frames_) { 1046 endpoint_buffer_size_frames_ != packet_size_frames_) {
703 hr = AUDCLNT_E_INVALID_SIZE; 1047 hr = AUDCLNT_E_INVALID_SIZE;
704 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE"; 1048 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE";
705 return hr; 1049 return hr;
706 } 1050 }
707 1051
708 // Set the event handle that the audio engine will signal each time 1052 // Set the event handle that the audio engine will signal each time
709 // a buffer becomes ready to be processed by the client. 1053 // a buffer becomes ready to be processed by the client.
710 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); 1054 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get());
711 if (FAILED(hr)) 1055 if (FAILED(hr))
712 return hr; 1056 return hr;
713 1057
714 // Get access to the IAudioRenderClient interface. This interface 1058 // Get access to the IAudioRenderClient interface. This interface
715 // enables us to write output data to a rendering endpoint buffer. 1059 // enables us to write output data to a rendering endpoint buffer.
716 // The methods in this interface manage the movement of data packets 1060 // The methods in this interface manage the movement of data packets
717 // that contain audio-rendering data. 1061 // that contain audio-rendering data.
718 hr = audio_client_->GetService(__uuidof(IAudioRenderClient), 1062 hr = audio_client_->GetService(__uuidof(IAudioRenderClient),
719 audio_render_client_.ReceiveVoid()); 1063 audio_render_client_.ReceiveVoid());
720 return hr; 1064 return hr;
721 } 1065 }
722 1066
723 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() { 1067 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() {
724 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_SHARED); 1068 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_SHARED);
725 1069
726 // TODO(henrika): this buffer scheme is still under development. 1070 // TODO(henrika): this buffer scheme is still under development.
727 // The exact details are yet to be determined based on tests with different 1071 // The exact details are yet to be determined based on tests with different
728 // audio clients. 1072 // audio clients.
729 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); 1073 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5);
730 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { 1074 if (audio_engine_mix_format_->Format.nSamplesPerSec == 48000) {
731 // Initial tests have shown that we have to add 10 ms extra to 1075 // Initial tests have shown that we have to add 10 ms extra to
732 // ensure that we don't run empty for any packet size. 1076 // ensure that we don't run empty for any packet size.
733 glitch_free_buffer_size_ms += 10; 1077 glitch_free_buffer_size_ms += 10;
734 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { 1078 } else if (audio_engine_mix_format_->Format.nSamplesPerSec == 44100) {
735 // Initial tests have shown that we have to add 20 ms extra to 1079 // Initial tests have shown that we have to add 20 ms extra to
736 // ensure that we don't run empty for any packet size. 1080 // ensure that we don't run empty for any packet size.
737 glitch_free_buffer_size_ms += 20; 1081 glitch_free_buffer_size_ms += 20;
738 } else { 1082 } else {
739 glitch_free_buffer_size_ms += 20; 1083 glitch_free_buffer_size_ms += 20;
740 } 1084 }
741 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; 1085 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms;
742 REFERENCE_TIME requested_buffer_duration = 1086 REFERENCE_TIME requested_buffer_duration =
743 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); 1087 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000);
744 1088
745 // Initialize the audio stream between the client and the device. 1089 // Initialize the audio stream between the client and the device.
746 // We connect indirectly through the audio engine by using shared mode 1090 // We connect indirectly through the audio engine by using shared mode
747 // and WASAPI is initialized in an event driven mode. 1091 // and WASAPI is initialized in an event driven mode.
748 // Note that this API ensures that the buffer is never smaller than the 1092 // Note that this API ensures that the buffer is never smaller than the
749 // minimum buffer size needed to ensure glitch-free rendering. 1093 // minimum buffer size needed to ensure glitch-free rendering.
750 // If we requests a buffer size that is smaller than the audio engine's 1094 // If we requests a buffer size that is smaller than the audio engine's
751 // minimum required buffer size, the method sets the buffer size to this 1095 // minimum required buffer size, the method sets the buffer size to this
752 // minimum buffer size rather than to the buffer size requested. 1096 // minimum buffer size rather than to the buffer size requested.
753 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, 1097 HRESULT hr = S_FALSE;
754 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | 1098 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
755 AUDCLNT_STREAMFLAGS_NOPERSIST, 1099 AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
756 requested_buffer_duration, 1100 AUDCLNT_STREAMFLAGS_NOPERSIST,
757 0, 1101 requested_buffer_duration,
758 &format_, 1102 0,
759 NULL); 1103 reinterpret_cast<WAVEFORMATEX*>(&format_),
1104 NULL);
760 return hr; 1105 return hr;
761 } 1106 }
762 1107
763 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() { 1108 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() {
764 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_EXCLUSIVE); 1109 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
765 1110
766 float f = (1000.0 * packet_size_frames_) / format_.nSamplesPerSec; 1111 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
767 REFERENCE_TIME requested_buffer_duration = 1112 REFERENCE_TIME requested_buffer_duration =
768 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); 1113 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
769 1114
770 // Initialize the audio stream between the client and the device. 1115 // Initialize the audio stream between the client and the device.
771 // For an exclusive-mode stream that uses event-driven buffering, the 1116 // For an exclusive-mode stream that uses event-driven buffering, the
772 // caller must specify nonzero values for hnsPeriodicity and 1117 // caller must specify nonzero values for hnsPeriodicity and
773 // hnsBufferDuration, and the values of these two parameters must be equal. 1118 // hnsBufferDuration, and the values of these two parameters must be equal.
774 // The Initialize method allocates two buffers for the stream. Each buffer 1119 // The Initialize method allocates two buffers for the stream. Each buffer
775 // is equal in duration to the value of the hnsBufferDuration parameter. 1120 // is equal in duration to the value of the hnsBufferDuration parameter.
776 // Following the Initialize call for a rendering stream, the caller should 1121 // Following the Initialize call for a rendering stream, the caller should
777 // fill the first of the two buffers before starting the stream. 1122 // fill the first of the two buffers before starting the stream.
778 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, 1123 HRESULT hr = S_FALSE;
779 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | 1124 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE,
780 AUDCLNT_STREAMFLAGS_NOPERSIST, 1125 AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
781 requested_buffer_duration, 1126 AUDCLNT_STREAMFLAGS_NOPERSIST,
782 requested_buffer_duration, 1127 requested_buffer_duration,
783 &format_, 1128 requested_buffer_duration,
784 NULL); 1129 reinterpret_cast<WAVEFORMATEX*>(&format_),
1130 NULL);
785 if (FAILED(hr)) { 1131 if (FAILED(hr)) {
786 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { 1132 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
787 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; 1133 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
788 1134
789 UINT32 aligned_buffer_size = 0; 1135 UINT32 aligned_buffer_size = 0;
790 audio_client_->GetBufferSize(&aligned_buffer_size); 1136 audio_client_->GetBufferSize(&aligned_buffer_size);
791 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; 1137 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
792 audio_client_.Release(); 1138 audio_client_.Release();
793 1139
794 // Calculate new aligned periodicity. Each unit of reference time 1140 // Calculate new aligned periodicity. Each unit of reference time
795 // is 100 nanoseconds. 1141 // is 100 nanoseconds.
796 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( 1142 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
797 (10000000.0 * aligned_buffer_size / format_.nSamplesPerSec) + 0.5); 1143 (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
1144 + 0.5);
798 1145
799 // It is possible to re-activate and re-initialize the audio client 1146 // It is possible to re-activate and re-initialize the audio client
800 // at this stage but we bail out with an error code instead and 1147 // at this stage but we bail out with an error code instead and
801 // combine it with a log message which informs about the suggested 1148 // combine it with a log message which informs about the suggested
802 // aligned buffer size which should be used instead. 1149 // aligned buffer size which should be used instead.
803 DVLOG(1) << "aligned_buffer_duration: " 1150 DVLOG(1) << "aligned_buffer_duration: "
804 << static_cast<double>(aligned_buffer_duration / 10000.0) 1151 << static_cast<double>(aligned_buffer_duration / 10000.0)
805 << " [ms]"; 1152 << " [ms]";
806 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { 1153 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) {
807 // We will get this error if we try to use a smaller buffer size than 1154 // We will get this error if we try to use a smaller buffer size than
(...skipping 19 matching lines...) Expand all
827 NOTREACHED() << "IMMNotificationClient should not use this method."; 1174 NOTREACHED() << "IMMNotificationClient should not use this method.";
828 if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) { 1175 if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) {
829 *object = static_cast < IMMNotificationClient*>(this); 1176 *object = static_cast < IMMNotificationClient*>(this);
830 } else { 1177 } else {
831 return E_NOINTERFACE; 1178 return E_NOINTERFACE;
832 } 1179 }
833 return S_OK; 1180 return S_OK;
834 } 1181 }
835 1182
836 STDMETHODIMP WASAPIAudioOutputStream::OnDeviceStateChanged(LPCWSTR device_id, 1183 STDMETHODIMP WASAPIAudioOutputStream::OnDeviceStateChanged(LPCWSTR device_id,
837 DWORD new_state) { 1184 DWORD new_state) {
838 #ifndef NDEBUG 1185 #ifndef NDEBUG
839 std::string device_name = GetDeviceName(device_id); 1186 std::string device_name = GetDeviceName(device_id);
840 std::string device_state; 1187 std::string device_state;
841 1188
842 switch (new_state) { 1189 switch (new_state) {
843 case DEVICE_STATE_ACTIVE: 1190 case DEVICE_STATE_ACTIVE:
844 device_state = "ACTIVE"; 1191 device_state = "ACTIVE";
845 break; 1192 break;
846 case DEVICE_STATE_DISABLED: 1193 case DEVICE_STATE_DISABLED:
847 device_state = "DISABLED"; 1194 device_state = "DISABLED";
848 break; 1195 break;
849 case DEVICE_STATE_NOTPRESENT: 1196 case DEVICE_STATE_NOTPRESENT:
850 device_state = "NOTPRESENT"; 1197 device_state = "NOTPRESENT";
851 break; 1198 break;
852 case DEVICE_STATE_UNPLUGGED: 1199 case DEVICE_STATE_UNPLUGGED:
853 device_state = "UNPLUGGED"; 1200 device_state = "UNPLUGGED";
854 break; 1201 break;
855 default: 1202 default:
856 break; 1203 break;
857 } 1204 }
858 1205
859 DVLOG(1) << "-> State changed to " << device_state 1206 DVLOG(1) << "-> State changed to " << device_state
860 << " for device: " << device_name; 1207 << " for device: " << device_name;
861 #endif 1208 #endif
862 return S_OK; 1209 return S_OK;
863 } 1210 }
864 1211
865 HRESULT WASAPIAudioOutputStream::OnDefaultDeviceChanged(EDataFlow flow, 1212 HRESULT WASAPIAudioOutputStream::OnDefaultDeviceChanged(
866 ERole role, LPCWSTR new_default_device_id) { 1213 EDataFlow flow, ERole role, LPCWSTR new_default_device_id) {
867 if (new_default_device_id == NULL) { 1214 if (new_default_device_id == NULL) {
868 // The user has removed or disabled the default device for our 1215 // The user has removed or disabled the default device for our
869 // particular role, and no other device is available to take that role. 1216 // particular role, and no other device is available to take that role.
870 DLOG(ERROR) << "All devices are disabled."; 1217 DLOG(ERROR) << "All devices are disabled.";
871 return E_FAIL; 1218 return E_FAIL;
872 } 1219 }
873 1220
874 if (flow == eRender && role == device_role_) { 1221 if (flow == eRender && role == device_role_) {
875 // Log the name of the new default device for our configured role. 1222 // Log the name of the new default device for our configured role.
876 std::string new_default_device = GetDeviceName(new_default_device_id); 1223 std::string new_default_device = GetDeviceName(new_default_device_id);
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
971 // are now re-initiated and it is now possible to re-start audio rendering. 1318 // are now re-initiated and it is now possible to re-start audio rendering.
972 1319
973 // Start rendering again using the new default audio endpoint. 1320 // Start rendering again using the new default audio endpoint.
974 hr = audio_client_->Start(); 1321 hr = audio_client_->Start();
975 1322
976 restart_rendering_mode_ = false; 1323 restart_rendering_mode_ = false;
977 return SUCCEEDED(hr); 1324 return SUCCEEDED(hr);
978 } 1325 }
979 1326
980 } // namespace media 1327 } // namespace media
OLDNEW
« no previous file with comments | « media/audio/win/audio_low_latency_output_win.h ('k') | media/audio/win/audio_low_latency_output_win_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698