Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/webrtc_audio_capturer.h" | 5 #include "content/renderer/media/webrtc_audio_capturer.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/macros.h" | 9 #include "base/macros.h" |
| 10 #include "base/metrics/histogram.h" | 10 #include "base/metrics/histogram.h" |
| 11 #include "base/strings/string_util.h" | 11 #include "base/strings/string_util.h" |
| 12 #include "base/strings/stringprintf.h" | 12 #include "base/strings/stringprintf.h" |
| 13 #include "build/build_config.h" | 13 #include "build/build_config.h" |
| 14 #include "content/child/child_process.h" | 14 #include "content/child/child_process.h" |
| 15 #include "content/renderer/media/audio_device_factory.h" | 15 #include "content/renderer/media/audio_device_factory.h" |
| 16 #include "content/renderer/media/media_stream_audio_processor.h" | 16 #include "content/renderer/media/media_stream_audio_processor.h" |
| 17 #include "content/renderer/media/media_stream_audio_processor_options.h" | 17 #include "content/renderer/media/media_stream_audio_processor_options.h" |
| 18 #include "content/renderer/media/media_stream_audio_source.h" | 18 #include "content/renderer/media/media_stream_audio_source.h" |
| 19 #include "content/renderer/media/media_stream_constraints_util.h" | 19 #include "content/renderer/media/media_stream_constraints_util.h" |
| 20 #include "content/renderer/media/webrtc_audio_device_impl.h" | 20 #include "content/renderer/media/webrtc_audio_device_impl.h" |
| 21 #include "content/renderer/media/webrtc_local_audio_track.h" | 21 #include "content/renderer/media/webrtc_local_audio_track.h" |
| 22 #include "content/renderer/media/webrtc_logging.h" | 22 #include "content/renderer/media/webrtc_logging.h" |
| 23 #include "media/audio/sample_rates.h" | 23 #include "media/audio/sample_rates.h" |
| 24 | 24 |
| 25 namespace content { | 25 namespace content { |
| 26 | 26 |
| 27 namespace { | 27 namespace { |
| 28 | 28 |
| 29 // Audio buffer sizes are specified in milliseconds. | |
| 30 const char kAudioLatency[] = "latencyMs"; | |
| 31 const int kMinAudioLatencyMs = 0; | |
| 32 const int kMaxAudioLatencyMs = 10000; | |
| 33 | |
| 34 // Method to check if any of the data in |audio_source| has energy. | 29 // Method to check if any of the data in |audio_source| has energy. |
| 35 bool HasDataEnergy(const media::AudioBus& audio_source) { | 30 bool HasDataEnergy(const media::AudioBus& audio_source) { |
|
mcasas
2016/02/26 01:28:19
Isn't this method exactly a !audio_source.AreFrame
miu
2016/02/27 03:46:37
Done.
| |
| 36 for (int ch = 0; ch < audio_source.channels(); ++ch) { | 31 for (int ch = 0; ch < audio_source.channels(); ++ch) { |
| 37 const float* channel_ptr = audio_source.channel(ch); | 32 const float* channel_ptr = audio_source.channel(ch); |
| 38 for (int frame = 0; frame < audio_source.frames(); ++frame) { | 33 for (int frame = 0; frame < audio_source.frames(); ++frame) { |
| 39 if (channel_ptr[frame] != 0) | 34 if (channel_ptr[frame] != 0) |
| 40 return true; | 35 return true; |
| 41 } | 36 } |
| 42 } | 37 } |
| 43 | 38 |
| 44 // All the data is zero. | 39 // All the data is zero. |
| 45 return false; | 40 return false; |
| 46 } | 41 } |
| 47 | 42 |
| 48 } // namespace | 43 } // namespace |
| 49 | 44 |
| 50 // Reference counted container of WebRtcLocalAudioTrack delegate. | 45 // Reference counted container of WebRtcLocalAudioTrack delegate. |
| 51 // TODO(xians): Switch to MediaStreamAudioSinkOwner. | 46 // TODO(xians): Switch to MediaStreamAudioSinkOwner. |
| 52 class WebRtcAudioCapturer::TrackOwner | 47 class WebRtcAudioCapturer::TrackOwner |
| 53 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { | 48 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { |
| 54 public: | 49 public: |
| 55 explicit TrackOwner(WebRtcLocalAudioTrack* track) | 50 explicit TrackOwner(WebRtcLocalAudioTrack* track) |
| 56 : delegate_(track) {} | 51 : delegate_(track) {} |
| 57 | 52 |
| 58 void Capture(const media::AudioBus& audio_bus, | 53 void Capture(const media::AudioBus& audio_bus, |
| 59 base::TimeTicks estimated_capture_time, | 54 base::TimeTicks estimated_capture_time) { |
| 60 bool force_report_nonzero_energy) { | |
| 61 base::AutoLock lock(lock_); | 55 base::AutoLock lock(lock_); |
| 62 if (delegate_) { | 56 if (delegate_) { |
| 63 delegate_->Capture(audio_bus, | 57 delegate_->Capture(audio_bus, estimated_capture_time); |
| 64 estimated_capture_time, | |
| 65 force_report_nonzero_energy); | |
| 66 } | 58 } |
| 67 } | 59 } |
| 68 | 60 |
| 69 void OnSetFormat(const media::AudioParameters& params) { | 61 void OnSetFormat(const media::AudioParameters& params) { |
| 70 base::AutoLock lock(lock_); | 62 base::AutoLock lock(lock_); |
| 71 if (delegate_) | 63 if (delegate_) |
| 72 delegate_->OnSetFormat(params); | 64 delegate_->OnSetFormat(params); |
| 73 } | 65 } |
| 74 | 66 |
| 75 void SetAudioProcessor( | 67 void SetAudioProcessor( |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 117 } | 109 } |
| 118 | 110 |
| 119 // Do NOT reference count the |delegate_| to avoid cyclic reference counting. | 111 // Do NOT reference count the |delegate_| to avoid cyclic reference counting. |
| 120 WebRtcLocalAudioTrack* delegate_; | 112 WebRtcLocalAudioTrack* delegate_; |
| 121 mutable base::Lock lock_; | 113 mutable base::Lock lock_; |
| 122 | 114 |
| 123 DISALLOW_COPY_AND_ASSIGN(TrackOwner); | 115 DISALLOW_COPY_AND_ASSIGN(TrackOwner); |
| 124 }; | 116 }; |
| 125 | 117 |
| 126 // static | 118 // static |
| 127 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( | 119 scoped_ptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( |
| 128 int render_frame_id, | 120 int render_frame_id, |
| 129 const StreamDeviceInfo& device_info, | 121 const StreamDeviceInfo& device_info, |
| 130 const blink::WebMediaConstraints& constraints, | 122 const blink::WebMediaConstraints& constraints, |
| 131 WebRtcAudioDeviceImpl* audio_device, | 123 WebRtcAudioDeviceImpl* audio_device, |
| 132 MediaStreamAudioSource* audio_source) { | 124 MediaStreamAudioSource* audio_source) { |
| 133 scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer( | 125 scoped_ptr<WebRtcAudioCapturer> capturer(new WebRtcAudioCapturer( |
| 134 render_frame_id, device_info, constraints, audio_device, audio_source); | 126 render_frame_id, device_info, constraints, audio_device, audio_source)); |
| 135 if (capturer->Initialize()) | 127 if (capturer->Initialize()) |
| 136 return capturer; | 128 return capturer; |
| 137 | 129 |
| 138 return NULL; | 130 return NULL; |
| 139 } | 131 } |
| 140 | 132 |
| 141 bool WebRtcAudioCapturer::Initialize() { | 133 bool WebRtcAudioCapturer::Initialize() { |
| 142 DCHECK(thread_checker_.CalledOnValidThread()); | 134 DCHECK(thread_checker_.CalledOnValidThread()); |
| 143 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; | 135 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; |
| 144 WebRtcLogMessage(base::StringPrintf( | 136 WebRtcLogMessage(base::StringPrintf( |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 200 << device_info_.device.input.sample_rate; | 192 << device_info_.device.input.sample_rate; |
| 201 media::AudioSampleRate asr; | 193 media::AudioSampleRate asr; |
| 202 if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) { | 194 if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) { |
| 203 UMA_HISTOGRAM_ENUMERATION( | 195 UMA_HISTOGRAM_ENUMERATION( |
| 204 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); | 196 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); |
| 205 } else { | 197 } else { |
| 206 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", | 198 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", |
| 207 device_info_.device.input.sample_rate); | 199 device_info_.device.input.sample_rate); |
| 208 } | 200 } |
| 209 | 201 |
| 210 // Initialize the buffer size to zero, which means it wasn't specified. | |
| 211 // If it is out of range, we return it to zero. | |
| 212 int buffer_size_ms = 0; | |
| 213 int buffer_size_samples = 0; | |
| 214 GetConstraintValueAsInteger(constraints_, kAudioLatency, &buffer_size_ms); | |
| 215 if (buffer_size_ms < kMinAudioLatencyMs || | |
| 216 buffer_size_ms > kMaxAudioLatencyMs) { | |
| 217 DVLOG(1) << "Ignoring out of range buffer size " << buffer_size_ms; | |
| 218 } else { | |
| 219 buffer_size_samples = | |
| 220 device_info_.device.input.sample_rate * buffer_size_ms / 1000; | |
| 221 } | |
| 222 DVLOG_IF(1, buffer_size_samples > 0) | |
| 223 << "Custom audio buffer size: " << buffer_size_samples << " samples"; | |
| 224 | |
| 225 // Create and configure the default audio capturing source. | 202 // Create and configure the default audio capturing source. |
| 226 SetCapturerSourceInternal( | 203 SetCapturerSourceInternal( |
| 227 AudioDeviceFactory::NewInputDevice(render_frame_id_), | 204 AudioDeviceFactory::NewInputDevice(render_frame_id_), channel_layout, |
| 228 channel_layout, | 205 device_info_.device.input.sample_rate); |
| 229 device_info_.device.input.sample_rate, | |
| 230 buffer_size_samples); | |
| 231 | 206 |
| 232 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware | 207 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware |
| 233 // information from the capturer. | 208 // information from the capturer. |
| 234 if (audio_device_) | 209 if (audio_device_) |
| 235 audio_device_->AddAudioCapturer(this); | 210 audio_device_->AddAudioCapturer(this); |
| 236 | 211 |
| 237 return true; | 212 return true; |
| 238 } | 213 } |
| 239 | 214 |
| 240 WebRtcAudioCapturer::WebRtcAudioCapturer( | 215 WebRtcAudioCapturer::WebRtcAudioCapturer( |
| 241 int render_frame_id, | 216 int render_frame_id, |
| 242 const StreamDeviceInfo& device_info, | 217 const StreamDeviceInfo& device_info, |
| 243 const blink::WebMediaConstraints& constraints, | 218 const blink::WebMediaConstraints& constraints, |
| 244 WebRtcAudioDeviceImpl* audio_device, | 219 WebRtcAudioDeviceImpl* audio_device, |
| 245 MediaStreamAudioSource* audio_source) | 220 MediaStreamAudioSource* audio_source) |
| 246 : constraints_(constraints), | 221 : constraints_(constraints), |
| 247 audio_processor_(new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 222 audio_processor_(new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 248 constraints, | 223 constraints, |
| 249 device_info.device.input, | 224 device_info.device.input, |
| 250 audio_device)), | 225 audio_device)), |
| 251 running_(false), | 226 running_(false), |
| 252 render_frame_id_(render_frame_id), | 227 render_frame_id_(render_frame_id), |
| 253 device_info_(device_info), | 228 device_info_(device_info), |
| 254 volume_(0), | 229 volume_(0), |
| 255 peer_connection_mode_(false), | 230 peer_connection_mode_(false), |
| 256 audio_device_(audio_device), | 231 audio_device_(audio_device), |
| 257 audio_source_(audio_source) { | 232 audio_source_(audio_source), |
| 233 weak_factory_(this) { | |
| 258 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; | 234 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; |
| 259 } | 235 } |
| 260 | 236 |
| 261 WebRtcAudioCapturer::~WebRtcAudioCapturer() { | 237 WebRtcAudioCapturer::~WebRtcAudioCapturer() { |
| 262 DCHECK(thread_checker_.CalledOnValidThread()); | 238 DCHECK(thread_checker_.CalledOnValidThread()); |
| 263 DCHECK(tracks_.IsEmpty()); | 239 DCHECK(tracks_.IsEmpty()); |
| 264 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; | 240 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; |
| 265 Stop(); | 241 Stop(); |
| 266 } | 242 } |
| 267 | 243 |
| 268 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { | 244 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { |
| 245 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 269 DCHECK(track); | 246 DCHECK(track); |
| 270 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; | 247 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; |
| 271 | 248 |
| 249 track->AddStopObserver(base::Bind(&WebRtcAudioCapturer::RemoveTrack, | |
| 250 weak_factory_.GetWeakPtr(), track)); | |
| 251 track->SetLevel(level_calculator_.level()); | |
| 252 | |
| 272 { | 253 { |
| 273 base::AutoLock auto_lock(lock_); | 254 base::AutoLock auto_lock(lock_); |
| 274 // Verify that |track| is not already added to the list. | 255 // Verify that |track| is not already added to the list. |
| 275 DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track))); | 256 DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track))); |
| 276 | 257 |
| 277 // Add with a tag, so we remember to call OnSetFormat() on the new | 258 // Add with a tag, so we remember to call OnSetFormat() on the new |
| 278 // track. | 259 // track. |
| 279 scoped_refptr<TrackOwner> track_owner(new TrackOwner(track)); | 260 scoped_refptr<TrackOwner> track_owner(new TrackOwner(track)); |
| 280 tracks_.AddAndTag(track_owner.get()); | 261 tracks_.AddAndTag(track_owner.get()); |
| 281 } | 262 } |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 305 // we have to call StopSource on the MediaStreamSource. This will call | 286 // we have to call StopSource on the MediaStreamSource. This will call |
| 306 // MediaStreamAudioSource::DoStopSource which in turn call | 287 // MediaStreamAudioSource::DoStopSource which in turn call |
| 307 // WebRtcAudioCapturerer::Stop(); | 288 // WebRtcAudioCapturerer::Stop(); |
| 308 audio_source_->StopSource(); | 289 audio_source_->StopSource(); |
| 309 } | 290 } |
| 310 } | 291 } |
| 311 | 292 |
| 312 void WebRtcAudioCapturer::SetCapturerSourceInternal( | 293 void WebRtcAudioCapturer::SetCapturerSourceInternal( |
| 313 const scoped_refptr<media::AudioCapturerSource>& source, | 294 const scoped_refptr<media::AudioCapturerSource>& source, |
| 314 media::ChannelLayout channel_layout, | 295 media::ChannelLayout channel_layout, |
| 315 int sample_rate, | 296 int sample_rate) { |
| 316 int buffer_size) { | |
| 317 DCHECK(thread_checker_.CalledOnValidThread()); | 297 DCHECK(thread_checker_.CalledOnValidThread()); |
| 318 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," | 298 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," |
| 319 << "sample_rate=" << sample_rate << ")"; | 299 << "sample_rate=" << sample_rate << ")"; |
| 320 scoped_refptr<media::AudioCapturerSource> old_source; | 300 scoped_refptr<media::AudioCapturerSource> old_source; |
| 321 { | 301 { |
| 322 base::AutoLock auto_lock(lock_); | 302 base::AutoLock auto_lock(lock_); |
| 323 if (source_.get() == source.get()) | 303 if (source_.get() == source.get()) |
| 324 return; | 304 return; |
| 325 | 305 |
| 326 source_.swap(old_source); | 306 source_.swap(old_source); |
| 327 source_ = source; | 307 source_ = source; |
| 328 | 308 |
| 329 // Reset the flag to allow starting the new source. | 309 // Reset the flag to allow starting the new source. |
| 330 running_ = false; | 310 running_ = false; |
| 331 } | 311 } |
| 332 | 312 |
| 333 DVLOG(1) << "Switching to a new capture source."; | 313 DVLOG(1) << "Switching to a new capture source."; |
| 334 if (old_source.get()) | 314 if (old_source.get()) |
| 335 old_source->Stop(); | 315 old_source->Stop(); |
| 336 | 316 |
| 337 // If the buffer size is zero, it has not been specified. | |
| 338 // We either default to 10ms, or use the hardware buffer size. | |
| 339 if (buffer_size == 0) | |
| 340 buffer_size = GetBufferSize(sample_rate); | |
| 341 | |
| 342 // Dispatch the new parameters both to the sink(s) and to the new source, | 317 // Dispatch the new parameters both to the sink(s) and to the new source, |
| 343 // also apply the new |constraints|. | 318 // also apply the new |constraints|. |
| 344 // The idea is to get rid of any dependency of the microphone parameters | 319 // The idea is to get rid of any dependency of the microphone parameters |
| 345 // which would normally be used by default. | 320 // which would normally be used by default. |
| 346 // bits_per_sample is always 16 for now. | 321 // bits_per_sample is always 16 for now. |
| 347 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 322 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 348 channel_layout, sample_rate, 16, buffer_size); | 323 channel_layout, sample_rate, 16, |
| 324 GetBufferSize(sample_rate)); | |
| 349 params.set_effects(device_info_.device.input.effects); | 325 params.set_effects(device_info_.device.input.effects); |
| 326 DCHECK(params.IsValid()); | |
| 350 | 327 |
| 351 { | 328 { |
| 352 base::AutoLock auto_lock(lock_); | 329 base::AutoLock auto_lock(lock_); |
| 353 // Notify the |audio_processor_| of the new format. | 330 // Notify the |audio_processor_| of the new format. |
| 354 audio_processor_->OnCaptureFormatChanged(params); | 331 audio_processor_->OnCaptureFormatChanged(params); |
| 355 | 332 |
| 356 // Notify all tracks about the new format. | 333 // Notify all tracks about the new format. |
| 357 tracks_.TagAll(); | 334 tracks_.TagAll(); |
| 358 } | 335 } |
| 359 | 336 |
| 360 if (source.get()) | 337 if (source.get()) |
| 361 source->Initialize(params, this, session_id()); | 338 source->Initialize(params, this, device_info_.session_id); |
| 362 | 339 |
| 363 Start(); | 340 Start(); |
| 364 } | 341 } |
| 365 | 342 |
| 366 void WebRtcAudioCapturer::EnablePeerConnectionMode() { | 343 void WebRtcAudioCapturer::EnablePeerConnectionMode() { |
| 367 DCHECK(thread_checker_.CalledOnValidThread()); | 344 DCHECK(thread_checker_.CalledOnValidThread()); |
| 368 DVLOG(1) << "EnablePeerConnectionMode"; | 345 DVLOG(1) << "EnablePeerConnectionMode"; |
| 369 // Do nothing if the peer connection mode has been enabled. | 346 // Do nothing if the peer connection mode has been enabled. |
| 370 if (peer_connection_mode_) | 347 if (peer_connection_mode_) |
| 371 return; | 348 return; |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 387 // Do nothing if the current buffer size is the WebRtc native buffer size. | 364 // Do nothing if the current buffer size is the WebRtc native buffer size. |
| 388 if (GetBufferSize(input_params.sample_rate()) == | 365 if (GetBufferSize(input_params.sample_rate()) == |
| 389 input_params.frames_per_buffer()) { | 366 input_params.frames_per_buffer()) { |
| 390 return; | 367 return; |
| 391 } | 368 } |
| 392 | 369 |
| 393 // Create a new audio stream as source which will open the hardware using | 370 // Create a new audio stream as source which will open the hardware using |
| 394 // WebRtc native buffer size. | 371 // WebRtc native buffer size. |
| 395 SetCapturerSourceInternal(AudioDeviceFactory::NewInputDevice(render_frame_id), | 372 SetCapturerSourceInternal(AudioDeviceFactory::NewInputDevice(render_frame_id), |
| 396 input_params.channel_layout(), | 373 input_params.channel_layout(), |
| 397 input_params.sample_rate(), | 374 input_params.sample_rate()); |
| 398 0); | |
| 399 } | 375 } |
| 400 | 376 |
| 401 void WebRtcAudioCapturer::Start() { | 377 void WebRtcAudioCapturer::Start() { |
| 402 DCHECK(thread_checker_.CalledOnValidThread()); | 378 DCHECK(thread_checker_.CalledOnValidThread()); |
| 403 DVLOG(1) << "WebRtcAudioCapturer::Start()"; | 379 DVLOG(1) << "WebRtcAudioCapturer::Start()"; |
| 404 base::AutoLock auto_lock(lock_); | 380 base::AutoLock auto_lock(lock_); |
| 405 if (running_ || !source_.get()) | 381 if (running_ || !source_.get()) |
| 406 return; | 382 return; |
| 407 | 383 |
| 408 // Start the data source, i.e., start capturing data from the current source. | 384 // Start the data source, i.e., start capturing data from the current source. |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 425 source = source_; | 401 source = source_; |
| 426 tracks = tracks_.Items(); | 402 tracks = tracks_.Items(); |
| 427 tracks_.Clear(); | 403 tracks_.Clear(); |
| 428 running_ = false; | 404 running_ = false; |
| 429 } | 405 } |
| 430 | 406 |
| 431 // Remove the capturer object from the WebRtcAudioDeviceImpl. | 407 // Remove the capturer object from the WebRtcAudioDeviceImpl. |
| 432 if (audio_device_) | 408 if (audio_device_) |
| 433 audio_device_->RemoveAudioCapturer(this); | 409 audio_device_->RemoveAudioCapturer(this); |
| 434 | 410 |
| 411 // Invalidate the weak pointers since we don't need the tracks to call our | |
| 412 // RemoveTrack() method when their Stop() method is called. | |
| 413 weak_factory_.InvalidateWeakPtrs(); | |
| 414 | |
| 435 for (TrackList::ItemList::const_iterator it = tracks.begin(); | 415 for (TrackList::ItemList::const_iterator it = tracks.begin(); |
| 436 it != tracks.end(); | 416 it != tracks.end(); |
| 437 ++it) { | 417 ++it) { |
| 438 (*it)->Stop(); | 418 (*it)->Stop(); |
| 439 } | 419 } |
| 440 | 420 |
| 441 if (source.get()) | 421 if (source.get()) |
| 442 source->Stop(); | 422 source->Stop(); |
| 443 | 423 |
| 444 // Stop the audio processor to avoid feeding render data into the processor. | 424 // Stop the audio processor to avoid feeding render data into the processor. |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 516 | 496 |
| 517 // Notify the tracks on when the format changes. This will do nothing if | 497 // Notify the tracks on when the format changes. This will do nothing if |
| 518 // |tracks_to_notify_format| is empty. | 498 // |tracks_to_notify_format| is empty. |
| 519 const media::AudioParameters& output_params = | 499 const media::AudioParameters& output_params = |
| 520 audio_processor_->OutputFormat(); | 500 audio_processor_->OutputFormat(); |
| 521 for (const auto& track : tracks_to_notify_format) { | 501 for (const auto& track : tracks_to_notify_format) { |
| 522 track->OnSetFormat(output_params); | 502 track->OnSetFormat(output_params); |
| 523 track->SetAudioProcessor(audio_processor_); | 503 track->SetAudioProcessor(audio_processor_); |
| 524 } | 504 } |
| 525 | 505 |
| 526 // Figure out if the pre-processed data has any energy or not, the | 506 // Figure out if the pre-processed data has any energy or not. This |
| 527 // information will be passed to the track to force the calculator | 507 // information will be passed to the level calculator to force it to report |
| 528 // to report energy in case the post-processed data is zeroed by the audio | 508 // energy in case the post-processed data is zeroed by the audio processing. |
| 529 // processing. | |
| 530 const bool force_report_nonzero_energy = HasDataEnergy(*audio_source); | 509 const bool force_report_nonzero_energy = HasDataEnergy(*audio_source); |
| 531 | 510 |
| 532 // Push the data to the processor for processing. | 511 // Push the data to the processor for processing. |
| 533 audio_processor_->PushCaptureData( | 512 audio_processor_->PushCaptureData( |
| 534 *audio_source, | 513 *audio_source, |
| 535 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); | 514 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); |
| 536 | 515 |
| 537 // Process and consume the data in the processor until there is not enough | 516 // Process and consume the data in the processor until there is not enough |
| 538 // data in the processor. | 517 // data in the processor. |
| 539 media::AudioBus* processed_data = nullptr; | 518 media::AudioBus* processed_data = nullptr; |
| 540 base::TimeDelta processed_data_audio_delay; | 519 base::TimeDelta processed_data_audio_delay; |
| 541 int new_volume = 0; | 520 int new_volume = 0; |
| 542 while (audio_processor_->ProcessAndConsumeData( | 521 while (audio_processor_->ProcessAndConsumeData( |
| 543 current_volume, key_pressed, | 522 current_volume, key_pressed, |
| 544 &processed_data, &processed_data_audio_delay, &new_volume)) { | 523 &processed_data, &processed_data_audio_delay, &new_volume)) { |
| 545 DCHECK(processed_data); | 524 DCHECK(processed_data); |
| 525 | |
| 526 level_calculator_.Calculate(*processed_data, force_report_nonzero_energy); | |
| 527 | |
| 546 const base::TimeTicks processed_data_capture_time = | 528 const base::TimeTicks processed_data_capture_time = |
| 547 reference_clock_snapshot - processed_data_audio_delay; | 529 reference_clock_snapshot - processed_data_audio_delay; |
| 548 for (const auto& track : tracks) { | 530 for (const auto& track : tracks) |
| 549 track->Capture(*processed_data, | 531 track->Capture(*processed_data, processed_data_capture_time); |
| 550 processed_data_capture_time, | |
| 551 force_report_nonzero_energy); | |
| 552 } | |
| 553 | 532 |
| 554 if (new_volume) { | 533 if (new_volume) { |
| 555 SetVolume(new_volume); | 534 SetVolume(new_volume); |
| 556 | 535 |
| 557 // Update the |current_volume| to avoid passing the old volume to AGC. | 536 // Update the |current_volume| to avoid passing the old volume to AGC. |
| 558 current_volume = new_volume; | 537 current_volume = new_volume; |
| 559 } | 538 } |
| 560 } | 539 } |
| 561 } | 540 } |
| 562 | 541 |
| 563 void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { | 542 void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { |
| 564 WebRtcLogMessage("WAC::OnCaptureError: " + message); | 543 WebRtcLogMessage("WAC::OnCaptureError: " + message); |
| 565 } | 544 } |
| 566 | 545 |
| 567 media::AudioParameters WebRtcAudioCapturer::source_audio_parameters() const { | 546 media::AudioParameters WebRtcAudioCapturer::GetInputFormat() const { |
| 568 base::AutoLock auto_lock(lock_); | 547 base::AutoLock auto_lock(lock_); |
| 569 return audio_processor_.get() ? audio_processor_->InputFormat() | 548 return audio_processor_->InputFormat(); |
| 570 : media::AudioParameters(); | |
| 571 } | |
| 572 | |
| 573 bool WebRtcAudioCapturer::GetPairedOutputParameters( | |
| 574 int* session_id, | |
| 575 int* output_sample_rate, | |
| 576 int* output_frames_per_buffer) const { | |
| 577 // Don't set output parameters unless all of them are valid. | |
| 578 if (device_info_.session_id <= 0 || | |
| 579 !device_info_.device.matched_output.sample_rate || | |
| 580 !device_info_.device.matched_output.frames_per_buffer) | |
| 581 return false; | |
| 582 | |
| 583 *session_id = device_info_.session_id; | |
| 584 *output_sample_rate = device_info_.device.matched_output.sample_rate; | |
| 585 *output_frames_per_buffer = | |
| 586 device_info_.device.matched_output.frames_per_buffer; | |
| 587 | |
| 588 return true; | |
| 589 } | 549 } |
| 590 | 550 |
| 591 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { | 551 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { |
| 592 DCHECK(thread_checker_.CalledOnValidThread()); | 552 DCHECK(thread_checker_.CalledOnValidThread()); |
| 593 #if defined(OS_ANDROID) | 553 #if defined(OS_ANDROID) |
| 594 // TODO(henrika): Tune and adjust buffer size on Android. | 554 // TODO(henrika): Tune and adjust buffer size on Android. |
| 595 return (2 * sample_rate / 100); | 555 return (2 * sample_rate / 100); |
| 596 #endif | 556 #endif |
| 597 | 557 |
| 598 // PeerConnection is running at a buffer size of 10ms data. A multiple of | 558 // PeerConnection is running at a buffer size of 10ms data. A multiple of |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 611 return hardware_buffer_size; | 571 return hardware_buffer_size; |
| 612 } | 572 } |
| 613 | 573 |
| 614 return (sample_rate / 100); | 574 return (sample_rate / 100); |
| 615 } | 575 } |
| 616 | 576 |
| 617 void WebRtcAudioCapturer::SetCapturerSource( | 577 void WebRtcAudioCapturer::SetCapturerSource( |
| 618 const scoped_refptr<media::AudioCapturerSource>& source, | 578 const scoped_refptr<media::AudioCapturerSource>& source, |
| 619 media::AudioParameters params) { | 579 media::AudioParameters params) { |
| 620 // Create a new audio stream as source which uses the new source. | 580 // Create a new audio stream as source which uses the new source. |
| 621 SetCapturerSourceInternal(source, | 581 SetCapturerSourceInternal(source, params.channel_layout(), |
| 622 params.channel_layout(), | 582 params.sample_rate()); |
| 623 params.sample_rate(), | |
| 624 0); | |
| 625 } | 583 } |
| 626 | 584 |
| 627 } // namespace content | 585 } // namespace content |
| OLD | NEW |