| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/webrtc_audio_capturer.h" | 5 #include "content/renderer/media/webrtc_audio_capturer.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
| 10 #include "base/string_util.h" | 10 #include "base/string_util.h" |
| (...skipping 26 matching lines...) Expand all Loading... |
| 37 int buffer_size = 0; | 37 int buffer_size = 0; |
| 38 #if defined(OS_WIN) || defined(OS_MACOSX) | 38 #if defined(OS_WIN) || defined(OS_MACOSX) |
| 39 // Use different buffer sizes depending on the current hardware sample rate. | 39 // Use different buffer sizes depending on the current hardware sample rate. |
| 40 if (sample_rate == 44100) { | 40 if (sample_rate == 44100) { |
| 41 // We do run at 44.1kHz at the actual audio layer, but ask for frames | 41 // We do run at 44.1kHz at the actual audio layer, but ask for frames |
| 42 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. | 42 // at 44.0kHz to ensure that we can feed them to the webrtc::VoiceEngine. |
| 43 buffer_size = 440; | 43 buffer_size = 440; |
| 44 } else { | 44 } else { |
| 45 buffer_size = (sample_rate / 100); | 45 buffer_size = (sample_rate / 100); |
| 46 DCHECK_EQ(buffer_size * 100, sample_rate) << | 46 DCHECK_EQ(buffer_size * 100, sample_rate) << |
| 47 "Sample rate not supported. Should have been caught in Init()."; | 47 "Sample rate not supported"; |
| 48 } | 48 } |
| 49 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | 49 #elif defined(OS_LINUX) || defined(OS_OPENBSD) |
| 50 // Based on tests using the current ALSA implementation in Chrome, we have | 50 // Based on tests using the current ALSA implementation in Chrome, we have |
| 51 // found that the best combination is 20ms on the input side and 10ms on the | 51 // found that the best combination is 20ms on the input side and 10ms on the |
| 52 // output side. | 52 // output side. |
| 53 // TODO(henrika): It might be possible to reduce the input buffer | 53 // TODO(henrika): It might be possible to reduce the input buffer |
| 54 // size and reduce the delay even more. | 54 // size and reduce the delay even more. |
| 55 buffer_size = 2 * sample_rate / 100; | 55 buffer_size = 2 * sample_rate / 100; |
| 56 #endif | 56 #endif |
| 57 | 57 |
| 58 return buffer_size; | 58 return buffer_size; |
| 59 } | 59 } |
| 60 | 60 |
| 61 // static | 61 // static |
| 62 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() { | 62 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() { |
| 63 scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(); | 63 scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(); |
| 64 if (capturer->Initialize()) | 64 return capturer; |
| 65 return capturer; | 65 } |
| 66 | 66 |
| 67 return NULL; | 67 bool WebRtcAudioCapturer::Initialize(media::ChannelLayout channel_layout, |
| 68 int sample_rate) { |
| 69 DCHECK(thread_checker_.CalledOnValidThread()); |
| 70 DCHECK(!sinks_.empty()); |
| 71 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; |
| 72 |
| 73 media::AudioParameters::Format format = |
| 74 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; |
| 75 |
| 76 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; |
| 77 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", |
| 78 channel_layout, media::CHANNEL_LAYOUT_MAX); |
| 79 |
| 80 // Verify that the reported input channel configuration is supported. |
| 81 if (channel_layout != media::CHANNEL_LAYOUT_MONO && |
| 82 channel_layout != media::CHANNEL_LAYOUT_STEREO) { |
| 83 DLOG(ERROR) << channel_layout |
| 84 << " is not a supported input channel configuration."; |
| 85 return false; |
| 86 } |
| 87 |
| 88 DVLOG(1) << "Audio input hardware sample rate: " << sample_rate; |
| 89 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputSampleRate", |
| 90 sample_rate, media::kUnexpectedAudioSampleRate); |
| 91 |
| 92 // Verify that the reported input hardware sample rate is supported |
| 93 // on the current platform. |
| 94 if (std::find(&kValidInputRates[0], |
| 95 &kValidInputRates[0] + arraysize(kValidInputRates), |
| 96 sample_rate) == |
| 97 &kValidInputRates[arraysize(kValidInputRates)]) { |
| 98 DLOG(ERROR) << sample_rate << " is not a supported input rate."; |
| 99 return false; |
| 100 } |
| 101 |
| 102 int buffer_size = GetBufferSizeForSampleRate(sample_rate); |
| 103 |
| 104 // Configure audio parameters for the default source. |
| 105 params_.Reset(format, channel_layout, sample_rate, 16, buffer_size); |
| 106 |
| 107 // Tell all sinks which format we use. |
| 108 for (SinkList::const_iterator it = sinks_.begin(); |
| 109 it != sinks_.end(); ++it) { |
| 110 (*it)->SetCaptureFormat(params_); |
| 111 } |
| 112 |
| 113 buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]); |
| 114 |
| 115 // Create and configure the default audio capturing source. The |source_| |
| 116 // will be overwritten if an external client later calls SetCapturerSource() |
| 117 // providing an alternaive media::AudioCapturerSource. |
| 118 SetCapturerSource(AudioDeviceFactory::NewInputDevice(), |
| 119 channel_layout, |
| 120 static_cast<float>(sample_rate)); |
| 121 |
| 122 return true; |
| 68 } | 123 } |
| 69 | 124 |
| 70 WebRtcAudioCapturer::WebRtcAudioCapturer() | 125 WebRtcAudioCapturer::WebRtcAudioCapturer() |
| 71 : source_(NULL), | 126 : source_(NULL), |
| 72 running_(false), | 127 running_(false), |
| 73 buffering_(false) { | 128 buffering_(false) { |
| 129 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; |
| 74 } | 130 } |
| 75 | 131 |
| 76 WebRtcAudioCapturer::~WebRtcAudioCapturer() { | 132 WebRtcAudioCapturer::~WebRtcAudioCapturer() { |
| 133 DCHECK(thread_checker_.CalledOnValidThread()); |
| 77 DCHECK(sinks_.empty()); | 134 DCHECK(sinks_.empty()); |
| 78 DCHECK(!loopback_fifo_); | 135 DCHECK(!loopback_fifo_); |
| 136 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; |
| 79 } | 137 } |
| 80 | 138 |
| 81 void WebRtcAudioCapturer::AddCapturerSink(WebRtcAudioCapturerSink* sink) { | 139 void WebRtcAudioCapturer::AddCapturerSink(WebRtcAudioCapturerSink* sink) { |
| 82 { | 140 DCHECK(thread_checker_.CalledOnValidThread()); |
| 83 base::AutoLock auto_lock(lock_); | 141 DVLOG(1) << "WebRtcAudioCapturer::AddCapturerSink()"; |
| 84 DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end()); | 142 base::AutoLock auto_lock(lock_); |
| 85 sinks_.push_back(sink); | 143 DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end()); |
| 86 } | 144 sinks_.push_back(sink); |
| 87 | |
| 88 // Tell the |sink| which format we use. | |
| 89 sink->SetCaptureFormat(params_); | |
| 90 } | 145 } |
| 91 | 146 |
| 92 void WebRtcAudioCapturer::RemoveCapturerSink(WebRtcAudioCapturerSink* sink) { | 147 void WebRtcAudioCapturer::RemoveCapturerSink(WebRtcAudioCapturerSink* sink) { |
| 148 DCHECK(thread_checker_.CalledOnValidThread()); |
| 149 DVLOG(1) << "WebRtcAudioCapturer::RemoveCapturerSink()"; |
| 93 base::AutoLock auto_lock(lock_); | 150 base::AutoLock auto_lock(lock_); |
| 94 for (SinkList::iterator it = sinks_.begin(); it != sinks_.end(); ++it) { | 151 for (SinkList::iterator it = sinks_.begin(); it != sinks_.end(); ++it) { |
| 95 if (sink == *it) { | 152 if (sink == *it) { |
| 96 sinks_.erase(it); | 153 sinks_.erase(it); |
| 97 break; | 154 break; |
| 98 } | 155 } |
| 99 } | 156 } |
| 100 } | 157 } |
| 101 | 158 |
| 102 void WebRtcAudioCapturer::SetCapturerSource( | 159 void WebRtcAudioCapturer::SetCapturerSource( |
| 103 const scoped_refptr<media::AudioCapturerSource>& source, | 160 const scoped_refptr<media::AudioCapturerSource>& source, |
| 104 media::ChannelLayout channel_layout, | 161 media::ChannelLayout channel_layout, |
| 105 float sample_rate) { | 162 float sample_rate) { |
| 106 DVLOG(1) << "SetCapturerSource()"; | 163 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," |
| 164 << "sample_rate=" << sample_rate << ")"; |
| 107 scoped_refptr<media::AudioCapturerSource> old_source; | 165 scoped_refptr<media::AudioCapturerSource> old_source; |
| 108 { | 166 { |
| 109 base::AutoLock auto_lock(lock_); | 167 base::AutoLock auto_lock(lock_); |
| 110 if (source_ == source) | 168 if (source_ == source) |
| 111 return; | 169 return; |
| 112 | 170 |
| 113 source_.swap(old_source); | 171 source_.swap(old_source); |
| 114 source_ = source; | 172 source_ = source; |
| 115 } | 173 } |
| 116 | 174 |
| 117 // Detach the old source from normal recording. | 175 const bool no_default_audio_source_exists = !buffer_.get(); |
| 118 if (old_source) { | 176 |
| 119 old_source->Stop(); | 177 // Detach the old source from normal recording or perform first-time |
| 178 // initialization if Initialize() has never been called. For the second |
| 179 // case, the caller is not "taking over an ongoing session" but instead |
| 180 // "taking control over a new session". |
| 181 if (old_source || no_default_audio_source_exists) { |
| 182 DVLOG(1) << "New capture source will now be utilized."; |
| 183 if (old_source) |
| 184 old_source->Stop(); |
| 120 | 185 |
| 121 // Dispatch the new parameters both to the sink(s) and to the new source. | 186 // Dispatch the new parameters both to the sink(s) and to the new source. |
| 122 // The idea is to get rid of any dependency of the microphone parameters | 187 // The idea is to get rid of any dependency of the microphone parameters |
| 123 // which would normally be used by default. | 188 // which would normally be used by default. |
| 124 | 189 |
| 125 int buffer_size = GetBufferSizeForSampleRate(sample_rate); | 190 int buffer_size = GetBufferSizeForSampleRate(sample_rate); |
| 126 if (!buffer_size) { | 191 if (!buffer_size) { |
| 127 DLOG(ERROR) << "Unsupported sample-rate: " << sample_rate; | 192 DLOG(ERROR) << "Unsupported sample-rate: " << sample_rate; |
| 128 return; | 193 return; |
| 129 } | 194 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 141 (*it)->SetCaptureFormat(params_); | 206 (*it)->SetCaptureFormat(params_); |
| 142 } | 207 } |
| 143 } | 208 } |
| 144 | 209 |
| 145 if (source) | 210 if (source) |
| 146 source->Initialize(params_, this, this); | 211 source->Initialize(params_, this, this); |
| 147 } | 212 } |
| 148 | 213 |
| 149 void WebRtcAudioCapturer::SetStopCallback( | 214 void WebRtcAudioCapturer::SetStopCallback( |
| 150 const base::Closure& on_device_stopped_cb) { | 215 const base::Closure& on_device_stopped_cb) { |
| 216 DCHECK(thread_checker_.CalledOnValidThread()); |
| 151 DVLOG(1) << "WebRtcAudioCapturer::SetStopCallback()"; | 217 DVLOG(1) << "WebRtcAudioCapturer::SetStopCallback()"; |
| 152 base::AutoLock auto_lock(lock_); | 218 base::AutoLock auto_lock(lock_); |
| 153 on_device_stopped_cb_ = on_device_stopped_cb; | 219 on_device_stopped_cb_ = on_device_stopped_cb; |
| 154 } | 220 } |
| 155 | 221 |
| 156 void WebRtcAudioCapturer::PrepareLoopback() { | 222 void WebRtcAudioCapturer::PrepareLoopback() { |
| 223 DCHECK(thread_checker_.CalledOnValidThread()); |
| 157 DVLOG(1) << "WebRtcAudioCapturer::PrepareLoopback()"; | 224 DVLOG(1) << "WebRtcAudioCapturer::PrepareLoopback()"; |
| 158 base::AutoLock auto_lock(lock_); | 225 base::AutoLock auto_lock(lock_); |
| 159 DCHECK(!loopback_fifo_); | 226 DCHECK(!loopback_fifo_); |
| 160 | 227 |
| 161 // TODO(henrika): we could add a more dynamic solution here but I prefer | 228 // TODO(henrika): we could add a more dynamic solution here but I prefer |
| 162 // a fixed size combined with bad audio at overflow. The alternative is | 229 // a fixed size combined with bad audio at overflow. The alternative is |
| 163 // that we start to build up latency and that can be more difficult to | 230 // that we start to build up latency and that can be more difficult to |
| 164 // detect. Tests have shown that the FIFO never contains more than 2 or 3 | 231 // detect. Tests have shown that the FIFO never contains more than 2 or 3 |
| 165 // audio frames but I have selected a max size of ten buffers just | 232 // audio frames but I have selected a max size of ten buffers just |
| 166 // in case since these tests were performed on a 16 core, 64GB Win 7 | 233 // in case since these tests were performed on a 16 core, 64GB Win 7 |
| 167 // machine. We could also add some sort of error notifier in this area if | 234 // machine. We could also add some sort of error notifier in this area if |
| 168 // the FIFO overflows. | 235 // the FIFO overflows. |
| 169 loopback_fifo_.reset(new media::AudioFifo(params_.channels(), | 236 loopback_fifo_.reset(new media::AudioFifo(params_.channels(), |
| 170 10 * params_.frames_per_buffer())); | 237 10 * params_.frames_per_buffer())); |
| 171 buffering_ = true; | 238 buffering_ = true; |
| 172 } | 239 } |
| 173 | 240 |
| 174 void WebRtcAudioCapturer::CancelLoopback() { | 241 void WebRtcAudioCapturer::CancelLoopback() { |
| 242 DCHECK(thread_checker_.CalledOnValidThread()); |
| 175 DVLOG(1) << "WebRtcAudioCapturer::CancelLoopback()"; | 243 DVLOG(1) << "WebRtcAudioCapturer::CancelLoopback()"; |
| 176 base::AutoLock auto_lock(lock_); | 244 base::AutoLock auto_lock(lock_); |
| 177 buffering_ = false; | 245 buffering_ = false; |
| 178 if (loopback_fifo_.get() != NULL) { | 246 if (loopback_fifo_.get() != NULL) { |
| 179 loopback_fifo_->Clear(); | 247 loopback_fifo_->Clear(); |
| 180 loopback_fifo_.reset(); | 248 loopback_fifo_.reset(); |
| 181 } | 249 } |
| 182 } | 250 } |
| 183 | 251 |
| 184 void WebRtcAudioCapturer::PauseBuffering() { | 252 void WebRtcAudioCapturer::PauseBuffering() { |
| 253 DCHECK(thread_checker_.CalledOnValidThread()); |
| 185 DVLOG(1) << "WebRtcAudioCapturer::PauseBuffering()"; | 254 DVLOG(1) << "WebRtcAudioCapturer::PauseBuffering()"; |
| 186 base::AutoLock auto_lock(lock_); | 255 base::AutoLock auto_lock(lock_); |
| 187 buffering_ = false; | 256 buffering_ = false; |
| 188 } | 257 } |
| 189 | 258 |
| 190 void WebRtcAudioCapturer::ResumeBuffering() { | 259 void WebRtcAudioCapturer::ResumeBuffering() { |
| 260 DCHECK(thread_checker_.CalledOnValidThread()); |
| 191 DVLOG(1) << "WebRtcAudioCapturer::ResumeBuffering()"; | 261 DVLOG(1) << "WebRtcAudioCapturer::ResumeBuffering()"; |
| 192 base::AutoLock auto_lock(lock_); | 262 base::AutoLock auto_lock(lock_); |
| 193 if (buffering_) | 263 if (buffering_) |
| 194 return; | 264 return; |
| 195 if (loopback_fifo_.get() != NULL) | 265 if (loopback_fifo_.get() != NULL) |
| 196 loopback_fifo_->Clear(); | 266 loopback_fifo_->Clear(); |
| 197 buffering_ = true; | 267 buffering_ = true; |
| 198 } | 268 } |
| 199 | 269 |
| 200 bool WebRtcAudioCapturer::Initialize() { | |
| 201 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; | |
| 202 // Ask the browser for the default audio input hardware sample-rate. | |
| 203 // This request is based on a synchronous IPC message. | |
| 204 // TODO(xians): we should ask for the native sample rate of a specific device. | |
| 205 int sample_rate = GetAudioInputSampleRate(); | |
| 206 DVLOG(1) << "Audio input hardware sample rate: " << sample_rate; | |
| 207 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputSampleRate", | |
| 208 sample_rate, media::kUnexpectedAudioSampleRate); | |
| 209 | |
| 210 // Verify that the reported input hardware sample rate is supported | |
| 211 // on the current platform. | |
| 212 if (std::find(&kValidInputRates[0], | |
| 213 &kValidInputRates[0] + arraysize(kValidInputRates), | |
| 214 sample_rate) == | |
| 215 &kValidInputRates[arraysize(kValidInputRates)]) { | |
| 216 DLOG(ERROR) << sample_rate << " is not a supported input rate."; | |
| 217 return false; | |
| 218 } | |
| 219 | |
| 220 // Ask the browser for the default number of audio input channels. | |
| 221 // This request is based on a synchronous IPC message. | |
| 222 // TODO(xians): we should ask for the layout of a specific device. | |
| 223 media::ChannelLayout channel_layout = GetAudioInputChannelLayout(); | |
| 224 DVLOG(1) << "Audio input hardware channels: " << channel_layout; | |
| 225 | |
| 226 media::AudioParameters::Format format = | |
| 227 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | |
| 228 int buffer_size = GetBufferSizeForSampleRate(sample_rate); | |
| 229 if (!buffer_size) { | |
| 230 DLOG(ERROR) << "Unsupported platform"; | |
| 231 return false; | |
| 232 } | |
| 233 | |
| 234 params_.Reset(format, channel_layout, sample_rate, 16, buffer_size); | |
| 235 | |
| 236 buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]); | |
| 237 | |
| 238 // Create and configure the default audio capturing source. The |source_| | |
| 239 // will be overwritten if the client call the source calls | |
| 240 // SetCapturerSource(). | |
| 241 SetCapturerSource( | |
| 242 AudioDeviceFactory::NewInputDevice(), channel_layout, sample_rate); | |
| 243 | |
| 244 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", | |
| 245 channel_layout, media::CHANNEL_LAYOUT_MAX); | |
| 246 | |
| 247 return true; | |
| 248 } | |
| 249 | |
| 250 void WebRtcAudioCapturer::ProvideInput(media::AudioBus* dest) { | 270 void WebRtcAudioCapturer::ProvideInput(media::AudioBus* dest) { |
| 251 base::AutoLock auto_lock(lock_); | 271 base::AutoLock auto_lock(lock_); |
| 252 DCHECK(loopback_fifo_.get() != NULL); | 272 DCHECK(loopback_fifo_.get() != NULL); |
| 253 | 273 |
| 254 if (!running_) { | 274 if (!running_) { |
| 255 dest->Zero(); | 275 dest->Zero(); |
| 256 return; | 276 return; |
| 257 } | 277 } |
| 258 | 278 |
| 259 // Provide data by reading from the FIFO if the FIFO contains enough | 279 // Provide data by reading from the FIFO if the FIFO contains enough |
| 260 // to fulfill the request. | 280 // to fulfill the request. |
| 261 if (loopback_fifo_->frames() >= dest->frames()) { | 281 if (loopback_fifo_->frames() >= dest->frames()) { |
| 262 loopback_fifo_->Consume(dest, 0, dest->frames()); | 282 loopback_fifo_->Consume(dest, 0, dest->frames()); |
| 263 } else { | 283 } else { |
| 264 dest->Zero(); | 284 dest->Zero(); |
| 265 // This warning is perfectly safe if it happens for the first audio | 285 // This warning is perfectly safe if it happens for the first audio |
| 266 // frames. It should not happen in a steady-state mode. | 286 // frames. It should not happen in a steady-state mode. |
| 267 DLOG(WARNING) << "WARNING: loopback FIFO is empty."; | 287 DVLOG(2) << "WARNING: loopback FIFO is empty."; |
| 268 } | 288 } |
| 269 } | 289 } |
| 270 | 290 |
| 271 void WebRtcAudioCapturer::Start() { | 291 void WebRtcAudioCapturer::Start() { |
| 272 DVLOG(1) << "WebRtcAudioCapturer::Start()"; | 292 DVLOG(1) << "WebRtcAudioCapturer::Start()"; |
| 273 base::AutoLock auto_lock(lock_); | 293 base::AutoLock auto_lock(lock_); |
| 274 if (running_) | 294 if (running_) |
| 275 return; | 295 return; |
| 276 | 296 |
| 277 // What Start() does is supposed to be very light, for example, posting a | 297 // What Start() does is supposed to be very light, for example, posting a |
| (...skipping 23 matching lines...) Expand all Loading... |
| 301 running_ = false; | 321 running_ = false; |
| 302 } | 322 } |
| 303 | 323 |
| 304 if (source) | 324 if (source) |
| 305 source->Stop(); | 325 source->Stop(); |
| 306 } | 326 } |
| 307 | 327 |
| 308 void WebRtcAudioCapturer::SetVolume(double volume) { | 328 void WebRtcAudioCapturer::SetVolume(double volume) { |
| 309 DVLOG(1) << "WebRtcAudioCapturer::SetVolume()"; | 329 DVLOG(1) << "WebRtcAudioCapturer::SetVolume()"; |
| 310 base::AutoLock auto_lock(lock_); | 330 base::AutoLock auto_lock(lock_); |
| 311 | |
| 312 if (source_) | 331 if (source_) |
| 313 source_->SetVolume(volume); | 332 source_->SetVolume(volume); |
| 314 } | 333 } |
| 315 | 334 |
| 316 void WebRtcAudioCapturer::SetDevice(int session_id) { | 335 void WebRtcAudioCapturer::SetDevice(int session_id) { |
| 336 DCHECK(thread_checker_.CalledOnValidThread()); |
| 317 DVLOG(1) << "WebRtcAudioCapturer::SetDevice(" << session_id << ")"; | 337 DVLOG(1) << "WebRtcAudioCapturer::SetDevice(" << session_id << ")"; |
| 318 base::AutoLock auto_lock(lock_); | 338 base::AutoLock auto_lock(lock_); |
| 319 if (source_) | 339 if (source_) |
| 320 source_->SetDevice(session_id); | 340 source_->SetDevice(session_id); |
| 321 } | 341 } |
| 322 | 342 |
| 323 void WebRtcAudioCapturer::SetAutomaticGainControl(bool enable) { | 343 void WebRtcAudioCapturer::SetAutomaticGainControl(bool enable) { |
| 324 base::AutoLock auto_lock(lock_); | 344 base::AutoLock auto_lock(lock_); |
| 325 if (source_) | 345 if (source_) |
| 326 source_->SetAutomaticGainControl(enable); | 346 source_->SetAutomaticGainControl(enable); |
| 327 } | 347 } |
| 328 | 348 |
| 329 bool WebRtcAudioCapturer::IsInLoopbackMode() { | 349 bool WebRtcAudioCapturer::IsInLoopbackMode() { |
| 350 DCHECK(thread_checker_.CalledOnValidThread()); |
| 330 base::AutoLock auto_lock(lock_); | 351 base::AutoLock auto_lock(lock_); |
| 331 return (loopback_fifo_ != NULL); | 352 return (loopback_fifo_ != NULL); |
| 332 } | 353 } |
| 333 | 354 |
| 334 void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source, | 355 void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source, |
| 335 int audio_delay_milliseconds, | 356 int audio_delay_milliseconds, |
| 336 double volume) { | 357 double volume) { |
| 337 // This callback is driven by AudioInputDevice::AudioThreadCallback if | 358 // This callback is driven by AudioInputDevice::AudioThreadCallback if |
| 338 // |source_| is AudioInputDevice, otherwise it is driven by client's | 359 // |source_| is AudioInputDevice, otherwise it is driven by client's |
| 339 // CaptureCallback. | 360 // CaptureCallback. |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 397 // Inform the local renderer about the stopped device. | 418 // Inform the local renderer about the stopped device. |
| 398 // The renderer can then save resources by not asking for more data from | 419 // The renderer can then save resources by not asking for more data from |
| 399 // the stopped source. We are on the IO thread but the callback task will | 420 // the stopped source. We are on the IO thread but the callback task will |
| 400 // be posted on the message loop of the main render thread thanks to | 421 // be posted on the message loop of the main render thread thanks to |
| 401 // usage of BindToLoop() when the callback was initialized. | 422 // usage of BindToLoop() when the callback was initialized. |
| 402 if (!on_device_stopped_cb_.is_null()) | 423 if (!on_device_stopped_cb_.is_null()) |
| 403 on_device_stopped_cb_.Run(); | 424 on_device_stopped_cb_.Run(); |
| 404 } | 425 } |
| 405 | 426 |
| 406 } // namespace content | 427 } // namespace content |
| OLD | NEW |