OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_device_impl.h" | 5 #include "content/renderer/media/webrtc_audio_device_impl.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
9 #include "base/string_util.h" | 9 #include "base/string_util.h" |
10 #include "base/win/windows_version.h" | 10 #include "base/win/windows_version.h" |
11 #include "content/renderer/media/audio_device_factory.h" | 11 #include "content/renderer/media/audio_device_factory.h" |
12 #include "content/renderer/media/audio_hardware.h" | 12 #include "content/renderer/media/audio_hardware.h" |
13 #include "content/renderer/render_thread_impl.h" | 13 #include "content/renderer/render_thread_impl.h" |
14 #include "media/audio/audio_parameters.h" | 14 #include "media/audio/audio_parameters.h" |
15 #include "media/audio/audio_util.h" | 15 #include "media/audio/audio_util.h" |
16 #include "media/audio/sample_rates.h" | 16 #include "media/audio/sample_rates.h" |
17 | 17 |
18 using content::AudioDeviceFactory; | 18 using content::AudioDeviceFactory; |
19 using media::AudioParameters; | 19 using media::AudioParameters; |
| 20 using media::ChannelLayout; |
20 | 21 |
21 static const int64 kMillisecondsBetweenProcessCalls = 5000; | 22 static const int64 kMillisecondsBetweenProcessCalls = 5000; |
22 static const double kMaxVolumeLevel = 255.0; | 23 static const double kMaxVolumeLevel = 255.0; |
23 | 24 |
24 // Supported hardware sample rates for input and output sides. | 25 // Supported hardware sample rates for input and output sides. |
25 #if defined(OS_WIN) || defined(OS_MACOSX) | 26 #if defined(OS_WIN) || defined(OS_MACOSX) |
26 // media::GetAudioInput[Output]HardwareSampleRate() asks the audio layer | 27 // media::GetAudioInput[Output]HardwareSampleRate() asks the audio layer |
27 // for its current sample rate (set by the user) on Windows and Mac OS X. | 28 // for its current sample rate (set by the user) on Windows and Mac OS X. |
28 // The listed rates below adds restrictions and WebRtcAudioDeviceImpl::Init() | 29 // The listed rates below adds restrictions and WebRtcAudioDeviceImpl::Init() |
29 // will fail if the user selects any rate outside these ranges. | 30 // will fail if the user selects any rate outside these ranges. |
(...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
445 in_sample_rate) == | 446 in_sample_rate) == |
446 &kValidInputRates[arraysize(kValidInputRates)]) { | 447 &kValidInputRates[arraysize(kValidInputRates)]) { |
447 DLOG(ERROR) << in_sample_rate << " is not a supported input rate."; | 448 DLOG(ERROR) << in_sample_rate << " is not a supported input rate."; |
448 return -1; | 449 return -1; |
449 } | 450 } |
450 | 451 |
451 // Ask the browser for the default number of audio input channels. | 452 // Ask the browser for the default number of audio input channels. |
452 // This request is based on a synchronous IPC message. | 453 // This request is based on a synchronous IPC message. |
453 ChannelLayout in_channel_layout = audio_hardware::GetInputChannelLayout(); | 454 ChannelLayout in_channel_layout = audio_hardware::GetInputChannelLayout(); |
454 DVLOG(1) << "Audio input hardware channels: " << in_channel_layout; | 455 DVLOG(1) << "Audio input hardware channels: " << in_channel_layout; |
455 ChannelLayout out_channel_layout = CHANNEL_LAYOUT_MONO; | 456 ChannelLayout out_channel_layout = media::CHANNEL_LAYOUT_MONO; |
456 | 457 |
457 AudioParameters::Format in_format = AudioParameters::AUDIO_PCM_LINEAR; | 458 AudioParameters::Format in_format = AudioParameters::AUDIO_PCM_LINEAR; |
458 int in_buffer_size = 0; | 459 int in_buffer_size = 0; |
459 int out_buffer_size = 0; | 460 int out_buffer_size = 0; |
460 | 461 |
461 // TODO(henrika): factor out all platform specific parts in separate | 462 // TODO(henrika): factor out all platform specific parts in separate |
462 // functions. Code is a bit messy right now. | 463 // functions. Code is a bit messy right now. |
463 | 464 |
464 // Windows | 465 // Windows |
465 #if defined(OS_WIN) | 466 #if defined(OS_WIN) |
466 // Always use stereo rendering on Windows. | 467 // Always use stereo rendering on Windows. |
467 out_channel_layout = CHANNEL_LAYOUT_STEREO; | 468 out_channel_layout = media::CHANNEL_LAYOUT_STEREO; |
468 | 469 |
469 DVLOG(1) << "Using AUDIO_PCM_LOW_LATENCY as input mode on Windows."; | 470 DVLOG(1) << "Using AUDIO_PCM_LOW_LATENCY as input mode on Windows."; |
470 in_format = AudioParameters::AUDIO_PCM_LOW_LATENCY; | 471 in_format = AudioParameters::AUDIO_PCM_LOW_LATENCY; |
471 | 472 |
472 // Capture side: AUDIO_PCM_LOW_LATENCY is based on the Core Audio (WASAPI) | 473 // Capture side: AUDIO_PCM_LOW_LATENCY is based on the Core Audio (WASAPI) |
473 // API which was introduced in Windows Vista. For lower Windows versions, | 474 // API which was introduced in Windows Vista. For lower Windows versions, |
474 // a callback-driven Wave implementation is used instead. An input buffer | 475 // a callback-driven Wave implementation is used instead. An input buffer |
475 // size of 10ms works well for both these implementations. | 476 // size of 10ms works well for both these implementations. |
476 | 477 |
477 // Use different buffer sizes depending on the current hardware sample rate. | 478 // Use different buffer sizes depending on the current hardware sample rate. |
(...skipping 26 matching lines...) Expand all Loading... |
504 // Windows XP and lower can't cope with 10 ms output buffer size. | 505 // Windows XP and lower can't cope with 10 ms output buffer size. |
505 // It must be extended to 30 ms (60 ms will be used internally by WaveOut). | 506 // It must be extended to 30 ms (60 ms will be used internally by WaveOut). |
506 if (!media::IsWASAPISupported()) { | 507 if (!media::IsWASAPISupported()) { |
507 out_buffer_size = 3 * out_buffer_size; | 508 out_buffer_size = 3 * out_buffer_size; |
508 DLOG(WARNING) << "Extending the output buffer size by a factor of three " | 509 DLOG(WARNING) << "Extending the output buffer size by a factor of three " |
509 << "since Windows XP has been detected."; | 510 << "since Windows XP has been detected."; |
510 } | 511 } |
511 | 512 |
512 // Mac OS X | 513 // Mac OS X |
513 #elif defined(OS_MACOSX) | 514 #elif defined(OS_MACOSX) |
514 out_channel_layout = CHANNEL_LAYOUT_MONO; | 515 out_channel_layout = media::CHANNEL_LAYOUT_MONO; |
515 | 516 |
516 DVLOG(1) << "Using AUDIO_PCM_LOW_LATENCY as input mode on Mac OS X."; | 517 DVLOG(1) << "Using AUDIO_PCM_LOW_LATENCY as input mode on Mac OS X."; |
517 in_format = AudioParameters::AUDIO_PCM_LOW_LATENCY; | 518 in_format = AudioParameters::AUDIO_PCM_LOW_LATENCY; |
518 | 519 |
519 // Capture side: AUDIO_PCM_LOW_LATENCY on Mac OS X is based on a callback- | 520 // Capture side: AUDIO_PCM_LOW_LATENCY on Mac OS X is based on a callback- |
520 // driven Core Audio implementation. Tests have shown that 10ms is a suitable | 521 // driven Core Audio implementation. Tests have shown that 10ms is a suitable |
521 // frame size to use, both for 48kHz and 44.1kHz. | 522 // frame size to use, both for 48kHz and 44.1kHz. |
522 | 523 |
523 // Use different buffer sizes depending on the current hardware sample rate. | 524 // Use different buffer sizes depending on the current hardware sample rate. |
524 if (in_sample_rate == 44100) { | 525 if (in_sample_rate == 44100) { |
(...skipping 13 matching lines...) Expand all Loading... |
538 // Use different buffer sizes depending on the current hardware sample rate. | 539 // Use different buffer sizes depending on the current hardware sample rate. |
539 if (out_sample_rate == 48000) { | 540 if (out_sample_rate == 48000) { |
540 out_buffer_size = 480; | 541 out_buffer_size = 480; |
541 } else { | 542 } else { |
542 // We do run at 44.1kHz at the actual audio layer, but ask for frames | 543 // We do run at 44.1kHz at the actual audio layer, but ask for frames |
543 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. | 544 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. |
544 out_buffer_size = 440; | 545 out_buffer_size = 440; |
545 } | 546 } |
546 // Linux | 547 // Linux |
547 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | 548 #elif defined(OS_LINUX) || defined(OS_OPENBSD) |
548 in_channel_layout = CHANNEL_LAYOUT_STEREO; | 549 in_channel_layout = media::CHANNEL_LAYOUT_STEREO; |
549 out_channel_layout = CHANNEL_LAYOUT_MONO; | 550 out_channel_layout = media::CHANNEL_LAYOUT_MONO; |
550 | 551 |
551 // Based on tests using the current ALSA implementation in Chrome, we have | 552 // Based on tests using the current ALSA implementation in Chrome, we have |
552 // found that the best combination is 20ms on the input side and 10ms on the | 553 // found that the best combination is 20ms on the input side and 10ms on the |
553 // output side. | 554 // output side. |
554 // TODO(henrika): It might be possible to reduce the input buffer | 555 // TODO(henrika): It might be possible to reduce the input buffer |
555 // size and reduce the delay even more. | 556 // size and reduce the delay even more. |
556 in_buffer_size = 2 * 480; | 557 in_buffer_size = 2 * 480; |
557 out_buffer_size = 480; | 558 out_buffer_size = 480; |
558 #else | 559 #else |
559 DLOG(ERROR) << "Unsupported platform"; | 560 DLOG(ERROR) << "Unsupported platform"; |
560 return -1; | 561 return -1; |
561 #endif | 562 #endif |
562 | 563 |
563 // Store utilized parameters to ensure that we can check them | 564 // Store utilized parameters to ensure that we can check them |
564 // after a successful initialization. | 565 // after a successful initialization. |
565 output_audio_parameters_.Reset( | 566 output_audio_parameters_.Reset( |
566 AudioParameters::AUDIO_PCM_LOW_LATENCY, out_channel_layout, | 567 AudioParameters::AUDIO_PCM_LOW_LATENCY, out_channel_layout, |
567 out_sample_rate, 16, out_buffer_size); | 568 out_sample_rate, 16, out_buffer_size); |
568 | 569 |
569 input_audio_parameters_.Reset( | 570 input_audio_parameters_.Reset( |
570 in_format, in_channel_layout, in_sample_rate, | 571 in_format, in_channel_layout, in_sample_rate, |
571 16, in_buffer_size); | 572 16, in_buffer_size); |
572 | 573 |
573 // Create and configure the audio capturing client. | 574 // Create and configure the audio capturing client. |
574 audio_input_device_ = AudioDeviceFactory::NewInputDevice(); | 575 audio_input_device_ = AudioDeviceFactory::NewInputDevice(); |
575 audio_input_device_->Initialize(input_audio_parameters_, this, this); | 576 audio_input_device_->Initialize(input_audio_parameters_, this, this); |
576 | 577 |
577 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", | 578 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", |
578 out_channel_layout, CHANNEL_LAYOUT_MAX); | 579 out_channel_layout, media::CHANNEL_LAYOUT_MAX); |
579 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", | 580 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", |
580 in_channel_layout, CHANNEL_LAYOUT_MAX); | 581 in_channel_layout, media::CHANNEL_LAYOUT_MAX); |
581 AddHistogramFramesPerBuffer(kAudioOutput, out_buffer_size); | 582 AddHistogramFramesPerBuffer(kAudioOutput, out_buffer_size); |
582 AddHistogramFramesPerBuffer(kAudioInput, in_buffer_size); | 583 AddHistogramFramesPerBuffer(kAudioInput, in_buffer_size); |
583 | 584 |
584 // Configure the audio rendering client. | 585 // Configure the audio rendering client. |
585 audio_output_device_->Initialize(output_audio_parameters_, this); | 586 audio_output_device_->Initialize(output_audio_parameters_, this); |
586 | 587 |
587 DCHECK(audio_input_device_); | 588 DCHECK(audio_input_device_); |
588 | 589 |
589 // Allocate local audio buffers based on the parameters above. | 590 // Allocate local audio buffers based on the parameters above. |
590 // It is assumed that each audio sample contains 16 bits and each | 591 // It is assumed that each audio sample contains 16 bits and each |
(...skipping 565 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1156 } | 1157 } |
1157 | 1158 |
1158 int32_t WebRtcAudioDeviceImpl::GetLoudspeakerStatus(bool* enabled) const { | 1159 int32_t WebRtcAudioDeviceImpl::GetLoudspeakerStatus(bool* enabled) const { |
1159 NOTIMPLEMENTED(); | 1160 NOTIMPLEMENTED(); |
1160 return -1; | 1161 return -1; |
1161 } | 1162 } |
1162 | 1163 |
1163 void WebRtcAudioDeviceImpl::SetSessionId(int session_id) { | 1164 void WebRtcAudioDeviceImpl::SetSessionId(int session_id) { |
1164 session_id_ = session_id; | 1165 session_id_ = session_id; |
1165 } | 1166 } |
OLD | NEW |