Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/renderer/media/webrtc_audio_renderer.h" | |
| 6 | |
| 7 #include "base/logging.h" | |
| 8 #include "base/metrics/histogram.h" | |
| 9 #include "base/string_util.h" | |
| 10 #include "content/renderer/media/audio_device_factory.h" | |
| 11 #include "content/renderer/media/audio_hardware.h" | |
| 12 #include "content/renderer/media/webrtc_audio_device_impl.h" | |
| 13 #include "media/audio/audio_util.h" | |
| 14 #include "media/audio/sample_rates.h" | |
| 15 | |
| 16 namespace content { | |
| 17 | |
| 18 // Supported hardware sample rates for output sides. | |
| 19 #if defined(OS_WIN) || defined(OS_MACOSX) | |
| 20 // media::GetAudioOutputHardwareSampleRate() asks the audio layer | |
| 21 // for its current sample rate (set by the user) on Windows and Mac OS X. | |
| 22 // The listed rates below adds restrictions and Initialize() | |
| 23 // will fail if the user selects any rate outside these ranges. | |
| 24 static int kValidOutputRates[] = {96000, 48000, 44100}; | |
| 25 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | |
| 26 static int kValidOutputRates[] = {48000, 44100}; | |
| 27 #endif | |
| 28 | |
| 29 | |
| 30 // TODO(xians): Merge the following code to WebRtcAudioCapturer, or remove. | |
| 31 enum AudioFramesPerBuffer { | |
| 32 k160, | |
| 33 k320, | |
| 34 k440, // WebRTC works internally with 440 audio frames at 44.1kHz. | |
| 35 k480, | |
| 36 k640, | |
| 37 k880, | |
| 38 k960, | |
| 39 k1440, | |
| 40 k1920, | |
| 41 kUnexpectedAudioBufferSize // Must always be last! | |
| 42 }; | |
| 43 | |
| 44 // Helper method to convert integral values to their respective enum values | |
| 45 // above, or kUnexpectedAudioBufferSize if no match exists. | |
| 46 static AudioFramesPerBuffer AsAudioFramesPerBuffer(int frames_per_buffer) { | |
| 47 switch (frames_per_buffer) { | |
| 48 case 160: return k160; | |
| 49 case 320: return k320; | |
| 50 case 440: return k440; | |
| 51 case 480: return k480; | |
| 52 case 640: return k640; | |
| 53 case 880: return k880; | |
| 54 case 960: return k960; | |
| 55 case 1440: return k1440; | |
| 56 case 1920: return k1920; | |
| 57 } | |
| 58 return kUnexpectedAudioBufferSize; | |
| 59 } | |
| 60 | |
| 61 static void AddHistogramFramesPerBuffer(int param) { | |
| 62 AudioFramesPerBuffer afpb = AsAudioFramesPerBuffer(param); | |
| 63 if (afpb != kUnexpectedAudioBufferSize) { | |
| 64 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", | |
| 65 afpb, kUnexpectedAudioBufferSize); | |
| 66 } else { | |
| 67 // Report unexpected sample rates using a unique histogram name. | |
| 68 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputFramesPerBufferUnexpected", param); | |
| 69 } | |
| 70 } | |
|
wjia(left Chromium)
2012/10/24 22:05:15
move above stuff into anonymous namespace.
no longer working on chromium
2012/10/25 10:19:41
Done.
| |
| 71 | |
| 72 WebRtcAudioRenderer::WebRtcAudioRenderer() | |
| 73 : state_(UNINITIALIZED) { | |
| 74 } | |
| 75 | |
| 76 WebRtcAudioRenderer::~WebRtcAudioRenderer() { | |
| 77 DCHECK_EQ(state_, UNINITIALIZED); | |
| 78 buffer_.reset(); | |
| 79 } | |
| 80 | |
| 81 void WebRtcAudioRenderer::Initialize( | |
| 82 WebRtcAudioRendererSource* source) { | |
| 83 sink_ = AudioDeviceFactory::NewOutputDevice(); | |
| 84 DCHECK(sink_); | |
| 85 | |
| 86 // Ask the browser for the default audio output hardware sample-rate. | |
| 87 // This request is based on a synchronous IPC message. | |
| 88 int sample_rate = GetAudioOutputSampleRate(); | |
| 89 DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; | |
| 90 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputSampleRate", | |
| 91 sample_rate, media::kUnexpectedAudioSampleRate); | |
| 92 | |
| 93 // Verify that the reported output hardware sample rate is supported | |
| 94 // on the current platform. | |
| 95 if (std::find(&kValidOutputRates[0], | |
| 96 &kValidOutputRates[0] + arraysize(kValidOutputRates), | |
| 97 sample_rate) == | |
| 98 &kValidOutputRates[arraysize(kValidOutputRates)]) { | |
| 99 DLOG(ERROR) << sample_rate << " is not a supported output rate."; | |
| 100 return; | |
| 101 } | |
| 102 | |
| 103 media::ChannelLayout channel_layout = media::CHANNEL_LAYOUT_STEREO; | |
| 104 | |
| 105 int buffer_size = 0; | |
| 106 | |
| 107 // Windows | |
| 108 #if defined(OS_WIN) | |
| 109 // Always use stereo rendering on Windows. | |
| 110 channel_layout = media::CHANNEL_LAYOUT_STEREO; | |
| 111 | |
| 112 // Render side: AUDIO_PCM_LOW_LATENCY is based on the Core Audio (WASAPI) | |
| 113 // API which was introduced in Windows Vista. For lower Windows versions, | |
| 114 // a callback-driven Wave implementation is used instead. An output buffer | |
| 115 // size of 10ms works well for WASAPI but 30ms is needed for Wave. | |
| 116 | |
| 117 // Use different buffer sizes depending on the current hardware sample rate. | |
| 118 if (sample_rate == 96000 || sample_rate == 48000) { | |
| 119 buffer_size = (sample_rate / 100); | |
| 120 } else { | |
| 121 // We do run at 44.1kHz at the actual audio layer, but ask for frames | |
| 122 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. | |
| 123 // TODO(henrika): figure out why we seem to need 20ms here for glitch- | |
| 124 // free audio. | |
| 125 buffer_size = 2 * 440; | |
| 126 } | |
| 127 | |
| 128 // Windows XP and lower can't cope with 10 ms output buffer size. | |
| 129 // It must be extended to 30 ms (60 ms will be used internally by WaveOut). | |
| 130 if (!media::IsWASAPISupported()) { | |
| 131 buffer_size = 3 * buffer_size; | |
| 132 DLOG(WARNING) << "Extending the output buffer size by a factor of three " | |
| 133 << "since Windows XP has been detected."; | |
| 134 } | |
| 135 #elif defined(OS_MACOSX) | |
| 136 channel_layout = media::CHANNEL_LAYOUT_MONO; | |
| 137 | |
| 138 // Render side: AUDIO_PCM_LOW_LATENCY on Mac OS X is based on a callback- | |
| 139 // driven Core Audio implementation. Tests have shown that 10ms is a suitable | |
| 140 // frame size to use, both for 48kHz and 44.1kHz. | |
| 141 | |
| 142 // Use different buffer sizes depending on the current hardware sample rate. | |
| 143 if (sample_rate == 48000) { | |
| 144 buffer_size = 480; | |
| 145 } else { | |
| 146 // We do run at 44.1kHz at the actual audio layer, but ask for frames | |
| 147 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. | |
| 148 buffer_size = 440; | |
| 149 } | |
| 150 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | |
| 151 channel_layout = media::CHANNEL_LAYOUT_MONO; | |
| 152 | |
| 153 // Based on tests using the current ALSA implementation in Chrome, we have | |
| 154 // found that 10ms buffer size on the output side works fine. | |
| 155 buffer_size = 480; | |
| 156 #else | |
| 157 DLOG(ERROR) << "Unsupported platform"; | |
| 158 return -1; | |
| 159 #endif | |
| 160 | |
| 161 // Store utilized parameters to ensure that we can check them | |
| 162 // after a successful initialization. | |
| 163 params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, | |
| 164 sample_rate, 16, buffer_size); | |
| 165 | |
| 166 // Allocate local audio buffers based on the parameters above. | |
| 167 // It is assumed that each audio sample contains 16 bits and each | |
| 168 // audio frame contains one or two audio samples depending on the | |
| 169 // number of channels. | |
| 170 buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]); | |
| 171 | |
| 172 source_ = source; | |
| 173 source->SetRenderFormat(params_); | |
| 174 | |
| 175 // Configure the audio rendering client and start the rendering. | |
| 176 sink_->Initialize(params_, this); | |
| 177 | |
| 178 sink_->Start(); | |
| 179 | |
| 180 state_ = PAUSED; | |
| 181 | |
| 182 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", | |
| 183 channel_layout, media::CHANNEL_LAYOUT_MAX); | |
| 184 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", | |
| 185 buffer_size, kUnexpectedAudioBufferSize); | |
| 186 AddHistogramFramesPerBuffer(buffer_size); | |
| 187 } | |
| 188 | |
| 189 void WebRtcAudioRenderer::Play() { | |
| 190 base::AutoLock auto_lock(lock_); | |
| 191 DCHECK_NE(state_, UNINITIALIZED); | |
| 192 | |
| 193 state_ = PLAYING; | |
| 194 } | |
| 195 | |
| 196 void WebRtcAudioRenderer::Pause() { | |
| 197 base::AutoLock auto_lock(lock_); | |
| 198 DCHECK_NE(state_, UNINITIALIZED); | |
| 199 | |
| 200 state_ = PAUSED; | |
| 201 } | |
| 202 | |
| 203 void WebRtcAudioRenderer::Stop() { | |
| 204 base::AutoLock auto_lock(lock_); | |
| 205 if (state_ == UNINITIALIZED) | |
| 206 return; | |
| 207 | |
| 208 sink_->Stop(); | |
| 209 | |
| 210 state_ = UNINITIALIZED; | |
| 211 } | |
| 212 | |
| 213 void WebRtcAudioRenderer::SetVolume(float volume) { | |
| 214 base::AutoLock auto_lock(lock_); | |
| 215 DCHECK_NE(state_, UNINITIALIZED); | |
| 216 if (state_ == UNINITIALIZED) | |
| 217 return; | |
| 218 | |
| 219 sink_->SetVolume(volume); | |
| 220 } | |
| 221 | |
| 222 int WebRtcAudioRenderer::Render(media::AudioBus* audio_bus, | |
| 223 int audio_delay_milliseconds) { | |
|
wjia(left Chromium)
2012/10/24 22:05:15
indent.
no longer working on chromium
2012/10/25 10:19:41
Done.
| |
| 224 { | |
| 225 base::AutoLock auto_lock(lock_); | |
| 226 if (state_ == UNINITIALIZED) | |
| 227 return 0; | |
| 228 } | |
| 229 | |
| 230 // TODO(xians): memset(buffer_)? | |
| 231 // We need to keep render data for the |source_| reglardless of |state_|, | |
| 232 // otherwise the data will be buffered up inside |source_| | |
| 233 source_->RenderData(reinterpret_cast<uint8*>(buffer_.get()), | |
| 234 audio_bus->channels(), audio_bus->frames(), | |
| 235 audio_delay_milliseconds); | |
| 236 | |
| 237 | |
| 238 { | |
| 239 base::AutoLock auto_lock(lock_); | |
| 240 // Return 0 frames to implicitly play out zero. | |
| 241 if (state_ != PLAYING) | |
| 242 return 0; | |
| 243 } | |
| 244 | |
| 245 // Deinterleave each channel and convert to 32-bit floating-point | |
| 246 // with nominal range -1.0 -> +1.0 to match the callback format. | |
| 247 audio_bus->FromInterleaved(buffer_.get(), audio_bus->frames(), | |
| 248 params_.bits_per_sample() / 8); | |
| 249 return audio_bus->frames(); | |
| 250 } | |
| 251 | |
| 252 void WebRtcAudioRenderer::OnRenderError() { | |
| 253 NOTIMPLEMENTED(); | |
| 254 LOG(ERROR) << "OnRenderError()"; | |
| 255 } | |
| 256 | |
| 257 } // namespace content | |
| OLD | NEW |