| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/webrtc_audio_capturer.h" | 5 #include "content/renderer/media/webrtc_audio_capturer.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/macros.h" | 9 #include "base/macros.h" |
| 10 #include "base/metrics/histogram.h" | 10 #include "base/metrics/histogram.h" |
| 11 #include "base/strings/string_util.h" | 11 #include "base/strings/string_util.h" |
| 12 #include "base/strings/stringprintf.h" | 12 #include "base/strings/stringprintf.h" |
| 13 #include "build/build_config.h" | 13 #include "build/build_config.h" |
| 14 #include "content/child/child_process.h" | 14 #include "content/child/child_process.h" |
| 15 #include "content/renderer/media/audio_device_factory.h" | 15 #include "content/renderer/media/audio_device_factory.h" |
| 16 #include "content/renderer/media/media_stream_audio_processor.h" | 16 #include "content/renderer/media/media_stream_audio_processor.h" |
| 17 #include "content/renderer/media/media_stream_audio_processor_options.h" | 17 #include "content/renderer/media/media_stream_audio_processor_options.h" |
| 18 #include "content/renderer/media/media_stream_audio_source.h" | 18 #include "content/renderer/media/media_stream_audio_source.h" |
| 19 #include "content/renderer/media/media_stream_constraints_util.h" | 19 #include "content/renderer/media/media_stream_constraints_util.h" |
| 20 #include "content/renderer/media/webrtc_audio_device_impl.h" | 20 #include "content/renderer/media/webrtc_audio_device_impl.h" |
| 21 #include "content/renderer/media/webrtc_local_audio_track.h" | 21 #include "content/renderer/media/webrtc_local_audio_track.h" |
| 22 #include "content/renderer/media/webrtc_logging.h" | 22 #include "content/renderer/media/webrtc_logging.h" |
| 23 #include "media/audio/sample_rates.h" | 23 #include "media/audio/sample_rates.h" |
| 24 | 24 |
| 25 namespace content { | 25 namespace content { |
| 26 | 26 |
| 27 namespace { | |
| 28 | |
| 29 // Audio buffer sizes are specified in milliseconds. | |
| 30 const char kAudioLatency[] = "latencyMs"; | |
| 31 const int kMinAudioLatencyMs = 0; | |
| 32 const int kMaxAudioLatencyMs = 10000; | |
| 33 | |
| 34 // Method to check if any of the data in |audio_source| has energy. | |
| 35 bool HasDataEnergy(const media::AudioBus& audio_source) { | |
| 36 for (int ch = 0; ch < audio_source.channels(); ++ch) { | |
| 37 const float* channel_ptr = audio_source.channel(ch); | |
| 38 for (int frame = 0; frame < audio_source.frames(); ++frame) { | |
| 39 if (channel_ptr[frame] != 0) | |
| 40 return true; | |
| 41 } | |
| 42 } | |
| 43 | |
| 44 // All the data is zero. | |
| 45 return false; | |
| 46 } | |
| 47 | |
| 48 } // namespace | |
| 49 | |
| 50 // Reference counted container of WebRtcLocalAudioTrack delegate. | 27 // Reference counted container of WebRtcLocalAudioTrack delegate. |
| 51 // TODO(xians): Switch to MediaStreamAudioSinkOwner. | 28 // TODO(xians): Switch to MediaStreamAudioSinkOwner. |
| 52 class WebRtcAudioCapturer::TrackOwner | 29 class WebRtcAudioCapturer::TrackOwner |
| 53 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { | 30 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { |
| 54 public: | 31 public: |
| 55 explicit TrackOwner(WebRtcLocalAudioTrack* track) | 32 explicit TrackOwner(WebRtcLocalAudioTrack* track) |
| 56 : delegate_(track) {} | 33 : delegate_(track) {} |
| 57 | 34 |
| 58 void Capture(const media::AudioBus& audio_bus, | 35 void Capture(const media::AudioBus& audio_bus, |
| 59 base::TimeTicks estimated_capture_time, | 36 base::TimeTicks estimated_capture_time) { |
| 60 bool force_report_nonzero_energy) { | |
| 61 base::AutoLock lock(lock_); | 37 base::AutoLock lock(lock_); |
| 62 if (delegate_) { | 38 if (delegate_) { |
| 63 delegate_->Capture(audio_bus, | 39 delegate_->Capture(audio_bus, estimated_capture_time); |
| 64 estimated_capture_time, | |
| 65 force_report_nonzero_energy); | |
| 66 } | 40 } |
| 67 } | 41 } |
| 68 | 42 |
| 69 void OnSetFormat(const media::AudioParameters& params) { | 43 void OnSetFormat(const media::AudioParameters& params) { |
| 70 base::AutoLock lock(lock_); | 44 base::AutoLock lock(lock_); |
| 71 if (delegate_) | 45 if (delegate_) |
| 72 delegate_->OnSetFormat(params); | 46 delegate_->OnSetFormat(params); |
| 73 } | 47 } |
| 74 | 48 |
| 75 void SetAudioProcessor( | |
| 76 const scoped_refptr<MediaStreamAudioProcessor>& processor) { | |
| 77 base::AutoLock lock(lock_); | |
| 78 if (delegate_) | |
| 79 delegate_->SetAudioProcessor(processor); | |
| 80 } | |
| 81 | |
| 82 void Reset() { | 49 void Reset() { |
| 83 base::AutoLock lock(lock_); | 50 base::AutoLock lock(lock_); |
| 84 delegate_ = NULL; | 51 delegate_ = NULL; |
| 85 } | 52 } |
| 86 | 53 |
| 87 void Stop() { | 54 void Stop() { |
| 88 base::AutoLock lock(lock_); | 55 base::AutoLock lock(lock_); |
| 89 DCHECK(delegate_); | 56 DCHECK(delegate_); |
| 90 | 57 |
| 91 // This can be reentrant so reset |delegate_| before calling out. | 58 // This can be reentrant so reset |delegate_| before calling out. |
| (...skipping 25 matching lines...) Expand all Loading... |
| 117 } | 84 } |
| 118 | 85 |
| 119 // Do NOT reference count the |delegate_| to avoid cyclic reference counting. | 86 // Do NOT reference count the |delegate_| to avoid cyclic reference counting. |
| 120 WebRtcLocalAudioTrack* delegate_; | 87 WebRtcLocalAudioTrack* delegate_; |
| 121 mutable base::Lock lock_; | 88 mutable base::Lock lock_; |
| 122 | 89 |
| 123 DISALLOW_COPY_AND_ASSIGN(TrackOwner); | 90 DISALLOW_COPY_AND_ASSIGN(TrackOwner); |
| 124 }; | 91 }; |
| 125 | 92 |
| 126 // static | 93 // static |
| 127 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( | 94 scoped_ptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( |
| 128 int render_frame_id, | 95 int render_frame_id, |
| 129 const StreamDeviceInfo& device_info, | 96 const StreamDeviceInfo& device_info, |
| 130 const blink::WebMediaConstraints& constraints, | 97 const blink::WebMediaConstraints& constraints, |
| 131 WebRtcAudioDeviceImpl* audio_device, | 98 WebRtcAudioDeviceImpl* audio_device, |
| 132 MediaStreamAudioSource* audio_source) { | 99 MediaStreamAudioSource* audio_source) { |
| 133 scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer( | 100 scoped_ptr<WebRtcAudioCapturer> capturer(new WebRtcAudioCapturer( |
| 134 render_frame_id, device_info, constraints, audio_device, audio_source); | 101 render_frame_id, device_info, constraints, audio_device, audio_source)); |
| 135 if (capturer->Initialize()) | 102 if (capturer->Initialize()) |
| 136 return capturer; | 103 return capturer; |
| 137 | 104 |
| 138 return NULL; | 105 return NULL; |
| 139 } | 106 } |
| 140 | 107 |
| 141 bool WebRtcAudioCapturer::Initialize() { | 108 bool WebRtcAudioCapturer::Initialize() { |
| 142 DCHECK(thread_checker_.CalledOnValidThread()); | 109 DCHECK(thread_checker_.CalledOnValidThread()); |
| 143 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; | 110 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; |
| 144 WebRtcLogMessage(base::StringPrintf( | 111 WebRtcLogMessage(base::StringPrintf( |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 199 << device_info_.device.input.sample_rate; | 166 << device_info_.device.input.sample_rate; |
| 200 media::AudioSampleRate asr; | 167 media::AudioSampleRate asr; |
| 201 if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) { | 168 if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) { |
| 202 UMA_HISTOGRAM_ENUMERATION( | 169 UMA_HISTOGRAM_ENUMERATION( |
| 203 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); | 170 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); |
| 204 } else { | 171 } else { |
| 205 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", | 172 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", |
| 206 device_info_.device.input.sample_rate); | 173 device_info_.device.input.sample_rate); |
| 207 } | 174 } |
| 208 | 175 |
| 209 // Initialize the buffer size to zero, which means it wasn't specified. | |
| 210 // If it is out of range, we return it to zero. | |
| 211 int buffer_size_ms = 0; | |
| 212 int buffer_size_samples = 0; | |
| 213 GetConstraintValueAsInteger(constraints_, kAudioLatency, &buffer_size_ms); | |
| 214 if (buffer_size_ms < kMinAudioLatencyMs || | |
| 215 buffer_size_ms > kMaxAudioLatencyMs) { | |
| 216 DVLOG(1) << "Ignoring out of range buffer size " << buffer_size_ms; | |
| 217 } else { | |
| 218 buffer_size_samples = | |
| 219 device_info_.device.input.sample_rate * buffer_size_ms / 1000; | |
| 220 } | |
| 221 DVLOG_IF(1, buffer_size_samples > 0) | |
| 222 << "Custom audio buffer size: " << buffer_size_samples << " samples"; | |
| 223 | |
| 224 // Create and configure the default audio capturing source. | 176 // Create and configure the default audio capturing source. |
| 225 SetCapturerSourceInternal( | 177 SetCapturerSourceInternal( |
| 226 AudioDeviceFactory::NewInputDevice(render_frame_id_), | 178 AudioDeviceFactory::NewInputDevice(render_frame_id_), channel_layout, |
| 227 channel_layout, | 179 device_info_.device.input.sample_rate); |
| 228 device_info_.device.input.sample_rate, | |
| 229 buffer_size_samples); | |
| 230 | 180 |
| 231 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware | 181 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware |
| 232 // information from the capturer. | 182 // information from the capturer. |
| 233 if (audio_device_) | 183 if (audio_device_) |
| 234 audio_device_->AddAudioCapturer(this); | 184 audio_device_->AddAudioCapturer(this); |
| 235 | 185 |
| 236 return true; | 186 return true; |
| 237 } | 187 } |
| 238 | 188 |
| 239 WebRtcAudioCapturer::WebRtcAudioCapturer( | 189 WebRtcAudioCapturer::WebRtcAudioCapturer( |
| (...skipping 18 matching lines...) Expand all Loading... |
| 258 } | 208 } |
| 259 | 209 |
| 260 WebRtcAudioCapturer::~WebRtcAudioCapturer() { | 210 WebRtcAudioCapturer::~WebRtcAudioCapturer() { |
| 261 DCHECK(thread_checker_.CalledOnValidThread()); | 211 DCHECK(thread_checker_.CalledOnValidThread()); |
| 262 DCHECK(tracks_.IsEmpty()); | 212 DCHECK(tracks_.IsEmpty()); |
| 263 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; | 213 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; |
| 264 Stop(); | 214 Stop(); |
| 265 } | 215 } |
| 266 | 216 |
| 267 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { | 217 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { |
| 218 DCHECK(thread_checker_.CalledOnValidThread()); |
| 268 DCHECK(track); | 219 DCHECK(track); |
| 269 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; | 220 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; |
| 270 | 221 |
| 222 track->SetLevel(level_calculator_.level()); |
| 223 |
| 224 // The track only grabs stats from the audio processor. Stats are only |
| 225 // available if audio processing is turned on. Therefore, only provide the |
| 226 // track a reference if audio processing is turned on. |
| 227 if (audio_processor_->has_audio_processing()) |
| 228 track->SetAudioProcessor(audio_processor_); |
| 229 |
| 271 { | 230 { |
| 272 base::AutoLock auto_lock(lock_); | 231 base::AutoLock auto_lock(lock_); |
| 273 // Verify that |track| is not already added to the list. | 232 // Verify that |track| is not already added to the list. |
| 274 DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track))); | 233 DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track))); |
| 275 | 234 |
| 276 // Add with a tag, so we remember to call OnSetFormat() on the new | 235 // Add with a tag, so we remember to call OnSetFormat() on the new |
| 277 // track. | 236 // track. |
| 278 scoped_refptr<TrackOwner> track_owner(new TrackOwner(track)); | 237 scoped_refptr<TrackOwner> track_owner(new TrackOwner(track)); |
| 279 tracks_.AddAndTag(track_owner.get()); | 238 tracks_.AddAndTag(track_owner.get()); |
| 280 } | 239 } |
| (...skipping 23 matching lines...) Expand all Loading... |
| 304 // we have to call StopSource on the MediaStreamSource. This will call | 263 // we have to call StopSource on the MediaStreamSource. This will call |
| 305 // MediaStreamAudioSource::DoStopSource which in turn call | 264 // MediaStreamAudioSource::DoStopSource which in turn call |
| 306 // WebRtcAudioCapturerer::Stop(); | 265 // WebRtcAudioCapturerer::Stop(); |
| 307 audio_source_->StopSource(); | 266 audio_source_->StopSource(); |
| 308 } | 267 } |
| 309 } | 268 } |
| 310 | 269 |
| 311 void WebRtcAudioCapturer::SetCapturerSourceInternal( | 270 void WebRtcAudioCapturer::SetCapturerSourceInternal( |
| 312 const scoped_refptr<media::AudioCapturerSource>& source, | 271 const scoped_refptr<media::AudioCapturerSource>& source, |
| 313 media::ChannelLayout channel_layout, | 272 media::ChannelLayout channel_layout, |
| 314 int sample_rate, | 273 int sample_rate) { |
| 315 int buffer_size) { | |
| 316 DCHECK(thread_checker_.CalledOnValidThread()); | 274 DCHECK(thread_checker_.CalledOnValidThread()); |
| 317 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," | 275 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," |
| 318 << "sample_rate=" << sample_rate << ")"; | 276 << "sample_rate=" << sample_rate << ")"; |
| 319 scoped_refptr<media::AudioCapturerSource> old_source; | 277 scoped_refptr<media::AudioCapturerSource> old_source; |
| 320 { | 278 { |
| 321 base::AutoLock auto_lock(lock_); | 279 base::AutoLock auto_lock(lock_); |
| 322 if (source_.get() == source.get()) | 280 if (source_.get() == source.get()) |
| 323 return; | 281 return; |
| 324 | 282 |
| 325 source_.swap(old_source); | 283 source_.swap(old_source); |
| 326 source_ = source; | 284 source_ = source; |
| 327 | 285 |
| 328 // Reset the flag to allow starting the new source. | 286 // Reset the flag to allow starting the new source. |
| 329 running_ = false; | 287 running_ = false; |
| 330 } | 288 } |
| 331 | 289 |
| 332 DVLOG(1) << "Switching to a new capture source."; | 290 DVLOG(1) << "Switching to a new capture source."; |
| 333 if (old_source.get()) | 291 if (old_source.get()) |
| 334 old_source->Stop(); | 292 old_source->Stop(); |
| 335 | 293 |
| 336 // If the buffer size is zero, it has not been specified. | |
| 337 // We either default to 10ms, or use the hardware buffer size. | |
| 338 if (buffer_size == 0) | |
| 339 buffer_size = GetBufferSize(sample_rate); | |
| 340 | |
| 341 // Dispatch the new parameters both to the sink(s) and to the new source, | 294 // Dispatch the new parameters both to the sink(s) and to the new source, |
| 342 // also apply the new |constraints|. | 295 // also apply the new |constraints|. |
| 343 // The idea is to get rid of any dependency of the microphone parameters | 296 // The idea is to get rid of any dependency of the microphone parameters |
| 344 // which would normally be used by default. | 297 // which would normally be used by default. |
| 345 // bits_per_sample is always 16 for now. | 298 // bits_per_sample is always 16 for now. |
| 346 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 299 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 347 channel_layout, sample_rate, 16, buffer_size); | 300 channel_layout, sample_rate, 16, |
| 301 GetBufferSize(sample_rate)); |
| 348 params.set_effects(device_info_.device.input.effects); | 302 params.set_effects(device_info_.device.input.effects); |
| 303 DCHECK(params.IsValid()); |
| 349 | 304 |
| 350 { | 305 { |
| 351 base::AutoLock auto_lock(lock_); | 306 base::AutoLock auto_lock(lock_); |
| 352 // Notify the |audio_processor_| of the new format. | 307 |
| 308 // Notify the |audio_processor_| of the new format. We're doing this while |
| 309 // the lock is held only because the signaling thread might be calling |
| 310 // GetInputFormat(). Simultaneous reads from the audio thread are NOT the |
| 311 // concern here since the source is currently stopped (i.e., no audio |
| 312 // capture calls can be executing). |
| 353 audio_processor_->OnCaptureFormatChanged(params); | 313 audio_processor_->OnCaptureFormatChanged(params); |
| 354 | 314 |
| 355 // Notify all tracks about the new format. | 315 // Notify all tracks about the new format. |
| 356 tracks_.TagAll(); | 316 tracks_.TagAll(); |
| 357 } | 317 } |
| 358 | 318 |
| 359 if (source.get()) | 319 if (source.get()) |
| 360 source->Initialize(params, this, session_id()); | 320 source->Initialize(params, this, device_info_.session_id); |
| 361 | 321 |
| 362 Start(); | 322 Start(); |
| 363 } | 323 } |
| 364 | 324 |
| 365 void WebRtcAudioCapturer::EnablePeerConnectionMode() { | 325 void WebRtcAudioCapturer::EnablePeerConnectionMode() { |
| 366 DCHECK(thread_checker_.CalledOnValidThread()); | 326 DCHECK(thread_checker_.CalledOnValidThread()); |
| 367 DVLOG(1) << "EnablePeerConnectionMode"; | 327 DVLOG(1) << "EnablePeerConnectionMode"; |
| 368 // Do nothing if the peer connection mode has been enabled. | 328 // Do nothing if the peer connection mode has been enabled. |
| 369 if (peer_connection_mode_) | 329 if (peer_connection_mode_) |
| 370 return; | 330 return; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 386 // Do nothing if the current buffer size is the WebRtc native buffer size. | 346 // Do nothing if the current buffer size is the WebRtc native buffer size. |
| 387 if (GetBufferSize(input_params.sample_rate()) == | 347 if (GetBufferSize(input_params.sample_rate()) == |
| 388 input_params.frames_per_buffer()) { | 348 input_params.frames_per_buffer()) { |
| 389 return; | 349 return; |
| 390 } | 350 } |
| 391 | 351 |
| 392 // Create a new audio stream as source which will open the hardware using | 352 // Create a new audio stream as source which will open the hardware using |
| 393 // WebRtc native buffer size. | 353 // WebRtc native buffer size. |
| 394 SetCapturerSourceInternal(AudioDeviceFactory::NewInputDevice(render_frame_id), | 354 SetCapturerSourceInternal(AudioDeviceFactory::NewInputDevice(render_frame_id), |
| 395 input_params.channel_layout(), | 355 input_params.channel_layout(), |
| 396 input_params.sample_rate(), | 356 input_params.sample_rate()); |
| 397 0); | |
| 398 } | 357 } |
| 399 | 358 |
| 400 void WebRtcAudioCapturer::Start() { | 359 void WebRtcAudioCapturer::Start() { |
| 401 DCHECK(thread_checker_.CalledOnValidThread()); | 360 DCHECK(thread_checker_.CalledOnValidThread()); |
| 402 DVLOG(1) << "WebRtcAudioCapturer::Start()"; | 361 DVLOG(1) << "WebRtcAudioCapturer::Start()"; |
| 403 base::AutoLock auto_lock(lock_); | 362 base::AutoLock auto_lock(lock_); |
| 404 if (running_ || !source_.get()) | 363 if (running_ || !source_.get()) |
| 405 return; | 364 return; |
| 406 | 365 |
| 407 // Start the data source, i.e., start capturing data from the current source. | 366 // Start the data source, i.e., start capturing data from the current source. |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 500 | 459 |
| 501 // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC. | 460 // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC. |
| 502 // The volume can be higher than 255 on Linux, and it will be cropped to | 461 // The volume can be higher than 255 on Linux, and it will be cropped to |
| 503 // 255 since AGC does not allow values out of range. | 462 // 255 since AGC does not allow values out of range. |
| 504 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); | 463 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); |
| 505 current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_; | 464 current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_; |
| 506 tracks = tracks_.Items(); | 465 tracks = tracks_.Items(); |
| 507 tracks_.RetrieveAndClearTags(&tracks_to_notify_format); | 466 tracks_.RetrieveAndClearTags(&tracks_to_notify_format); |
| 508 } | 467 } |
| 509 | 468 |
| 469 // Sanity-check the input audio format in debug builds. Then, notify the |
| 470 // tracks if the format has changed. |
| 471 // |
| 472 // Locking is not needed here to read the audio input/output parameters |
| 473 // because the audio processor format changes only occur while audio capture |
| 474 // is stopped. |
| 510 DCHECK(audio_processor_->InputFormat().IsValid()); | 475 DCHECK(audio_processor_->InputFormat().IsValid()); |
| 511 DCHECK_EQ(audio_source->channels(), | 476 DCHECK_EQ(audio_source->channels(), |
| 512 audio_processor_->InputFormat().channels()); | 477 audio_processor_->InputFormat().channels()); |
| 513 DCHECK_EQ(audio_source->frames(), | 478 DCHECK_EQ(audio_source->frames(), |
| 514 audio_processor_->InputFormat().frames_per_buffer()); | 479 audio_processor_->InputFormat().frames_per_buffer()); |
| 515 | 480 if (!tracks_to_notify_format.empty()) { |
| 516 // Notify the tracks on when the format changes. This will do nothing if | 481 const media::AudioParameters& output_params = |
| 517 // |tracks_to_notify_format| is empty. | 482 audio_processor_->OutputFormat(); |
| 518 const media::AudioParameters& output_params = | 483 for (const auto& track : tracks_to_notify_format) |
| 519 audio_processor_->OutputFormat(); | 484 track->OnSetFormat(output_params); |
| 520 for (const auto& track : tracks_to_notify_format) { | |
| 521 track->OnSetFormat(output_params); | |
| 522 track->SetAudioProcessor(audio_processor_); | |
| 523 } | 485 } |
| 524 | 486 |
| 525 // Figure out if the pre-processed data has any energy or not, the | 487 // Figure out if the pre-processed data has any energy or not. This |
| 526 // information will be passed to the track to force the calculator | 488 // information will be passed to the level calculator to force it to report |
| 527 // to report energy in case the post-processed data is zeroed by the audio | 489 // energy in case the post-processed data is zeroed by the audio processing. |
| 528 // processing. | 490 const bool force_report_nonzero_energy = !audio_source->AreFramesZero(); |
| 529 const bool force_report_nonzero_energy = HasDataEnergy(*audio_source); | |
| 530 | 491 |
| 531 // Push the data to the processor for processing. | 492 // Push the data to the processor for processing. |
| 532 audio_processor_->PushCaptureData( | 493 audio_processor_->PushCaptureData( |
| 533 *audio_source, | 494 *audio_source, |
| 534 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); | 495 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); |
| 535 | 496 |
| 536 // Process and consume the data in the processor until there is not enough | 497 // Process and consume the data in the processor until there is not enough |
| 537 // data in the processor. | 498 // data in the processor. |
| 538 media::AudioBus* processed_data = nullptr; | 499 media::AudioBus* processed_data = nullptr; |
| 539 base::TimeDelta processed_data_audio_delay; | 500 base::TimeDelta processed_data_audio_delay; |
| 540 int new_volume = 0; | 501 int new_volume = 0; |
| 541 while (audio_processor_->ProcessAndConsumeData( | 502 while (audio_processor_->ProcessAndConsumeData( |
| 542 current_volume, key_pressed, | 503 current_volume, key_pressed, |
| 543 &processed_data, &processed_data_audio_delay, &new_volume)) { | 504 &processed_data, &processed_data_audio_delay, &new_volume)) { |
| 544 DCHECK(processed_data); | 505 DCHECK(processed_data); |
| 506 |
| 507 level_calculator_.Calculate(*processed_data, force_report_nonzero_energy); |
| 508 |
| 545 const base::TimeTicks processed_data_capture_time = | 509 const base::TimeTicks processed_data_capture_time = |
| 546 reference_clock_snapshot - processed_data_audio_delay; | 510 reference_clock_snapshot - processed_data_audio_delay; |
| 547 for (const auto& track : tracks) { | 511 for (const auto& track : tracks) |
| 548 track->Capture(*processed_data, | 512 track->Capture(*processed_data, processed_data_capture_time); |
| 549 processed_data_capture_time, | |
| 550 force_report_nonzero_energy); | |
| 551 } | |
| 552 | 513 |
| 553 if (new_volume) { | 514 if (new_volume) { |
| 554 SetVolume(new_volume); | 515 SetVolume(new_volume); |
| 555 | 516 |
| 556 // Update the |current_volume| to avoid passing the old volume to AGC. | 517 // Update the |current_volume| to avoid passing the old volume to AGC. |
| 557 current_volume = new_volume; | 518 current_volume = new_volume; |
| 558 } | 519 } |
| 559 } | 520 } |
| 560 } | 521 } |
| 561 | 522 |
| 562 void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { | 523 void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { |
| 563 WebRtcLogMessage("WAC::OnCaptureError: " + message); | 524 WebRtcLogMessage("WAC::OnCaptureError: " + message); |
| 564 } | 525 } |
| 565 | 526 |
| 566 media::AudioParameters WebRtcAudioCapturer::source_audio_parameters() const { | 527 media::AudioParameters WebRtcAudioCapturer::GetInputFormat() const { |
| 567 base::AutoLock auto_lock(lock_); | 528 base::AutoLock auto_lock(lock_); |
| 568 return audio_processor_.get() ? audio_processor_->InputFormat() | 529 return audio_processor_->InputFormat(); |
| 569 : media::AudioParameters(); | |
| 570 } | |
| 571 | |
| 572 bool WebRtcAudioCapturer::GetPairedOutputParameters( | |
| 573 int* session_id, | |
| 574 int* output_sample_rate, | |
| 575 int* output_frames_per_buffer) const { | |
| 576 // Don't set output parameters unless all of them are valid. | |
| 577 if (device_info_.session_id <= 0 || | |
| 578 !device_info_.device.matched_output.sample_rate || | |
| 579 !device_info_.device.matched_output.frames_per_buffer) | |
| 580 return false; | |
| 581 | |
| 582 *session_id = device_info_.session_id; | |
| 583 *output_sample_rate = device_info_.device.matched_output.sample_rate; | |
| 584 *output_frames_per_buffer = | |
| 585 device_info_.device.matched_output.frames_per_buffer; | |
| 586 | |
| 587 return true; | |
| 588 } | 530 } |
| 589 | 531 |
| 590 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { | 532 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { |
| 591 DCHECK(thread_checker_.CalledOnValidThread()); | 533 DCHECK(thread_checker_.CalledOnValidThread()); |
| 592 #if defined(OS_ANDROID) | 534 #if defined(OS_ANDROID) |
| 593 // TODO(henrika): Tune and adjust buffer size on Android. | 535 // TODO(henrika): Tune and adjust buffer size on Android. |
| 594 return (2 * sample_rate / 100); | 536 return (2 * sample_rate / 100); |
| 595 #endif | 537 #endif |
| 596 | 538 |
| 597 // PeerConnection is running at a buffer size of 10ms data. A multiple of | 539 // PeerConnection is running at a buffer size of 10ms data. A multiple of |
| (...skipping 12 matching lines...) Expand all Loading... |
| 610 return hardware_buffer_size; | 552 return hardware_buffer_size; |
| 611 } | 553 } |
| 612 | 554 |
| 613 return (sample_rate / 100); | 555 return (sample_rate / 100); |
| 614 } | 556 } |
| 615 | 557 |
| 616 void WebRtcAudioCapturer::SetCapturerSource( | 558 void WebRtcAudioCapturer::SetCapturerSource( |
| 617 const scoped_refptr<media::AudioCapturerSource>& source, | 559 const scoped_refptr<media::AudioCapturerSource>& source, |
| 618 media::AudioParameters params) { | 560 media::AudioParameters params) { |
| 619 // Create a new audio stream as source which uses the new source. | 561 // Create a new audio stream as source which uses the new source. |
| 620 SetCapturerSourceInternal(source, | 562 SetCapturerSourceInternal(source, params.channel_layout(), |
| 621 params.channel_layout(), | 563 params.sample_rate()); |
| 622 params.sample_rate(), | |
| 623 0); | |
| 624 } | 564 } |
| 625 | 565 |
| 626 } // namespace content | 566 } // namespace content |
| OLD | NEW |