Index: content/renderer/media/webrtc/processed_local_audio_source.cc |
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc/processed_local_audio_source.cc |
similarity index 28% |
rename from content/renderer/media/webrtc_audio_capturer.cc |
rename to content/renderer/media/webrtc/processed_local_audio_source.cc |
index de076b6ec55140006139f4445be1fffdd002c2a0..863542761ea6201bc222e050f5ae991c90b8003b 100644 |
--- a/content/renderer/media/webrtc_audio_capturer.cc |
+++ b/content/renderer/media/webrtc/processed_local_audio_source.cc |
@@ -2,141 +2,169 @@ |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
-#include "content/renderer/media/webrtc_audio_capturer.h" |
+#include "content/renderer/media/webrtc/processed_local_audio_source.h" |
-#include "base/bind.h" |
#include "base/logging.h" |
-#include "base/macros.h" |
#include "base/metrics/histogram.h" |
-#include "base/strings/string_util.h" |
#include "base/strings/stringprintf.h" |
-#include "build/build_config.h" |
-#include "content/child/child_process.h" |
#include "content/renderer/media/audio_device_factory.h" |
-#include "content/renderer/media/media_stream_audio_processor.h" |
#include "content/renderer/media/media_stream_audio_processor_options.h" |
-#include "content/renderer/media/media_stream_audio_source.h" |
#include "content/renderer/media/media_stream_constraints_util.h" |
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h" |
#include "content/renderer/media/webrtc_audio_device_impl.h" |
-#include "content/renderer/media/webrtc_local_audio_track.h" |
#include "content/renderer/media/webrtc_logging.h" |
+#include "content/renderer/render_frame_impl.h" |
#include "media/audio/sample_rates.h" |
+#include "media/base/channel_layout.h" |
+#include "third_party/webrtc/api/mediaconstraintsinterface.h" |
+#include "third_party/webrtc/media/base/mediachannel.h" |
namespace content { |
-// Reference counted container of WebRtcLocalAudioTrack delegate. |
-// TODO(xians): Switch to MediaStreamAudioSinkOwner. |
-class WebRtcAudioCapturer::TrackOwner |
- : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { |
- public: |
- explicit TrackOwner(WebRtcLocalAudioTrack* track) |
- : delegate_(track) {} |
- |
- void Capture(const media::AudioBus& audio_bus, |
- base::TimeTicks estimated_capture_time) { |
- base::AutoLock lock(lock_); |
- if (delegate_) { |
- delegate_->Capture(audio_bus, estimated_capture_time); |
- } |
- } |
+namespace { |
+// Used as an identifier for ProcessedLocalAudioSource::From(). |
+void* const kClassIdentifier = const_cast<void**>(&kClassIdentifier); |
+} // namespace |
- void OnSetFormat(const media::AudioParameters& params) { |
- base::AutoLock lock(lock_); |
- if (delegate_) |
- delegate_->OnSetFormat(params); |
- } |
+ProcessedLocalAudioSource::ProcessedLocalAudioSource( |
+ int consumer_render_frame_id, |
+ const StreamDeviceInfo& device_info, |
+ PeerConnectionDependencyFactory* factory) |
+ : MediaStreamAudioSource(true /* is_local_source */), |
+ consumer_render_frame_id_(consumer_render_frame_id), |
+ pc_factory_(factory), |
+ volume_(0), |
+ allow_invalid_render_frame_id_for_testing_(false) { |
+ DCHECK(pc_factory_); |
+ DVLOG(1) << "ProcessedLocalAudioSource::ProcessedLocalAudioSource()"; |
+ MediaStreamSource::SetDeviceInfo(device_info); |
+} |
- void Reset() { |
- base::AutoLock lock(lock_); |
- delegate_ = NULL; |
- } |
+ProcessedLocalAudioSource::~ProcessedLocalAudioSource() { |
+ DVLOG(1) << "ProcessedLocalAudioSource::~ProcessedLocalAudioSource()"; |
+ EnsureSourceIsStopped(); |
+} |
- void Stop() { |
- base::AutoLock lock(lock_); |
- DCHECK(delegate_); |
+// static |
+ProcessedLocalAudioSource* ProcessedLocalAudioSource::From( |
+ MediaStreamAudioSource* source) { |
+ if (source && source->GetClassIdentifier() == kClassIdentifier) |
+ return static_cast<ProcessedLocalAudioSource*>(source); |
+ return nullptr; |
+} |
- // This can be reentrant so reset |delegate_| before calling out. |
- WebRtcLocalAudioTrack* temp = delegate_; |
- delegate_ = NULL; |
- temp->Stop(); |
- } |
+void ProcessedLocalAudioSource::SetSourceConstraints( |
+ const blink::WebMediaConstraints& constraints) { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ DCHECK(!constraints.isNull()); |
+ DCHECK(!source_); |
+ constraints_ = constraints; |
+} |
- // Wrapper which allows to use std::find_if() when adding and removing |
- // sinks to/from the list. |
- struct TrackWrapper { |
- explicit TrackWrapper(WebRtcLocalAudioTrack* track) : track_(track) {} |
- bool operator()( |
- const scoped_refptr<WebRtcAudioCapturer::TrackOwner>& owner) const { |
- return owner->IsEqual(track_); |
- } |
- WebRtcLocalAudioTrack* track_; |
- }; |
+void* ProcessedLocalAudioSource::GetClassIdentifier() const { |
+ return kClassIdentifier; |
+} |
- protected: |
- virtual ~TrackOwner() {} |
+bool ProcessedLocalAudioSource::EnsureSourceIsStarted() { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
- private: |
- friend class base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner>; |
+ if (source_) |
+ return true; |
- bool IsEqual(const WebRtcLocalAudioTrack* other) const { |
- base::AutoLock lock(lock_); |
- return (other == delegate_); |
+ // Sanity-check that the consuming RenderFrame still exists. This is required |
+ // to initialize the audio source. |
+ if (!allow_invalid_render_frame_id_for_testing_ && |
+ !RenderFrameImpl::FromRoutingID(consumer_render_frame_id_)) { |
+ WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
+ " because the render frame does not exist."); |
+ return false; |
} |
- // Do NOT reference count the |delegate_| to avoid cyclic reference counting. |
- WebRtcLocalAudioTrack* delegate_; |
- mutable base::Lock lock_; |
- |
- DISALLOW_COPY_AND_ASSIGN(TrackOwner); |
-}; |
- |
-// static |
-std::unique_ptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( |
- int render_frame_id, |
- const StreamDeviceInfo& device_info, |
- const blink::WebMediaConstraints& constraints, |
- WebRtcAudioDeviceImpl* audio_device, |
- MediaStreamAudioSource* audio_source) { |
- std::unique_ptr<WebRtcAudioCapturer> capturer(new WebRtcAudioCapturer( |
- render_frame_id, device_info, constraints, audio_device, audio_source)); |
- if (capturer->Initialize()) |
- return capturer; |
- |
- return NULL; |
-} |
- |
-bool WebRtcAudioCapturer::Initialize() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; |
WebRtcLogMessage(base::StringPrintf( |
- "WAC::Initialize. render_frame_id=%d" |
+ "ProcessedLocalAudioSource::EnsureSourceIsStarted. render_frame_id=%d" |
", channel_layout=%d, sample_rate=%d, buffer_size=%d" |
", session_id=%d, paired_output_sample_rate=%d" |
", paired_output_frames_per_buffer=%d, effects=%d. ", |
- render_frame_id_, device_info_.device.input.channel_layout, |
- device_info_.device.input.sample_rate, |
- device_info_.device.input.frames_per_buffer, device_info_.session_id, |
- device_info_.device.matched_output.sample_rate, |
- device_info_.device.matched_output.frames_per_buffer, |
- device_info_.device.input.effects)); |
- |
- if (render_frame_id_ == -1) { |
- // Return true here to allow injecting a new source via |
- // SetCapturerSourceForTesting() at a later state. |
- return true; |
+ consumer_render_frame_id_, device_info().device.input.channel_layout, |
+ device_info().device.input.sample_rate, |
+ device_info().device.input.frames_per_buffer, device_info().session_id, |
+ device_info().device.matched_output.sample_rate, |
+ device_info().device.matched_output.frames_per_buffer, |
+ device_info().device.input.effects)); |
+ |
+ // Sanity-check that the constraints, plus the additional input effects are |
+ // valid when combined. |
+ const MediaAudioConstraints audio_constraints( |
+ constraints_, device_info().device.input.effects); |
+ if (!audio_constraints.IsValid()) { |
+ WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
+ " because MediaAudioConstraints are not valid."); |
+ return false; |
} |
- MediaAudioConstraints audio_constraints(constraints_, |
- device_info_.device.input.effects); |
- if (!audio_constraints.IsValid()) |
+ // Build an AudioOptions by applying relevant constraints to it, and then use |
+ // it to create a webrtc::AudioSourceInterface instance. |
+ cricket::AudioOptions rtc_options; |
+ rtc_options.echo_cancellation = ConstraintToOptional( |
+ constraints_, &blink::WebMediaTrackConstraintSet::echoCancellation); |
+ rtc_options.delay_agnostic_aec = ConstraintToOptional( |
+ constraints_, &blink::WebMediaTrackConstraintSet::googDAEchoCancellation); |
+ rtc_options.auto_gain_control = ConstraintToOptional( |
+ constraints_, &blink::WebMediaTrackConstraintSet::googAutoGainControl); |
+ rtc_options.experimental_agc = ConstraintToOptional( |
+ constraints_, |
+ &blink::WebMediaTrackConstraintSet::googExperimentalAutoGainControl); |
+ rtc_options.noise_suppression = ConstraintToOptional( |
+ constraints_, &blink::WebMediaTrackConstraintSet::googNoiseSuppression); |
+ rtc_options.experimental_ns = ConstraintToOptional( |
+ constraints_, |
+ &blink::WebMediaTrackConstraintSet::googExperimentalNoiseSuppression); |
+ rtc_options.highpass_filter = ConstraintToOptional( |
+ constraints_, &blink::WebMediaTrackConstraintSet::googHighpassFilter); |
+ rtc_options.typing_detection = ConstraintToOptional( |
+ constraints_, |
+ &blink::WebMediaTrackConstraintSet::googTypingNoiseDetection); |
+ rtc_options.stereo_swapping = ConstraintToOptional( |
+ constraints_, &blink::WebMediaTrackConstraintSet::googAudioMirroring); |
+ MediaAudioConstraints::ApplyFixedAudioConstraints(&rtc_options); |
+ if (device_info().device.input.effects & |
+ media::AudioParameters::ECHO_CANCELLER) { |
+ // TODO(hta): Figure out if we should be looking at echoCancellation. |
+ // Previous code had googEchoCancellation only. |
+ const blink::BooleanConstraint& echoCancellation = |
+ constraints_.basic().googEchoCancellation; |
+ if (echoCancellation.hasExact() && !echoCancellation.exact()) { |
+ StreamDeviceInfo modified_device_info(device_info()); |
+ modified_device_info.device.input.effects &= |
+ ~media::AudioParameters::ECHO_CANCELLER; |
+ SetDeviceInfo(modified_device_info); |
+ } |
+ rtc_options.echo_cancellation = rtc::Optional<bool>(false); |
+ } |
+ rtc_source_ = pc_factory_->CreateLocalAudioSource(rtc_options); |
+ if (rtc_source_->state() != webrtc::MediaSourceInterface::kLive) { |
+ WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
+ " because the rtc LocalAudioSource is not live."); |
return false; |
+ } |
- media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>( |
- device_info_.device.input.channel_layout); |
+ // Create the MediaStreamAudioProcessor, bound to the WebRTC audio device |
+ // module. |
+ WebRtcAudioDeviceImpl* const rtc_audio_device = |
+ pc_factory_->GetWebRtcAudioDevice(); |
+ if (!rtc_audio_device) { |
+ WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
+ " because there is no WebRtcAudioDeviceImpl instance."); |
+ return false; |
+ } |
+ audio_processor_ = new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
+ constraints_, device_info().device.input, rtc_audio_device); |
// If KEYBOARD_MIC effect is set, change the layout to the corresponding |
// layout that includes the keyboard mic. |
- if ((device_info_.device.input.effects & |
+ media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>( |
+ device_info().device.input.channel_layout); |
+ if ((device_info().device.input.effects & |
media::AudioParameters::KEYBOARD_MIC) && |
audio_constraints.GetGoogExperimentalNoiseSuppression()) { |
if (channel_layout == media::CHANNEL_LAYOUT_STEREO) { |
@@ -157,282 +185,102 @@ bool WebRtcAudioCapturer::Initialize() { |
if (channel_layout != media::CHANNEL_LAYOUT_MONO && |
channel_layout != media::CHANNEL_LAYOUT_STEREO && |
channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) { |
- DLOG(ERROR) << channel_layout |
- << " is not a supported input channel configuration."; |
+ WebRtcLogMessage(base::StringPrintf( |
+ "ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
+ " because the input channel layout (%d) is not supported.", |
+ static_cast<int>(channel_layout))); |
return false; |
} |
DVLOG(1) << "Audio input hardware sample rate: " |
- << device_info_.device.input.sample_rate; |
+ << device_info().device.input.sample_rate; |
media::AudioSampleRate asr; |
- if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) { |
+ if (media::ToAudioSampleRate(device_info().device.input.sample_rate, &asr)) { |
UMA_HISTOGRAM_ENUMERATION( |
"WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); |
} else { |
UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", |
- device_info_.device.input.sample_rate); |
+ device_info().device.input.sample_rate); |
} |
- // Create and configure the default audio capturing source. |
- SetCapturerSourceInternal( |
- AudioDeviceFactory::NewAudioCapturerSource(render_frame_id_), |
- channel_layout, device_info_.device.input.sample_rate); |
+ // Determine the audio format required of the AudioCapturerSource. Then, pass |
+ // that to the |audio_processor_| and set the output format of this |
+ // ProcessedLocalAudioSource to the processor's output format. |
+ media::AudioParameters params( |
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, |
+ device_info().device.input.sample_rate, 16, |
+ GetBufferSize(device_info().device.input.sample_rate)); |
+ params.set_effects(device_info().device.input.effects); |
+ DCHECK(params.IsValid()); |
+ audio_processor_->OnCaptureFormatChanged(params); |
+ MediaStreamAudioSource::SetFormat(audio_processor_->OutputFormat()); |
+ |
+ // Start the source. |
+ VLOG(1) << "Starting WebRTC audio source for consumption by render frame " |
+ << consumer_render_frame_id_ << " with input parameters={" |
+ << params.AsHumanReadableString() << "} and output parameters={" |
+ << GetAudioParameters().AsHumanReadableString() << '}'; |
+ source_ = |
+ AudioDeviceFactory::NewAudioCapturerSource(consumer_render_frame_id_); |
+ source_->Initialize(params, this, device_info().session_id); |
+ // We need to set the AGC control before starting the stream. |
+ source_->SetAutomaticGainControl(true); |
+ source_->Start(); |
- // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware |
- // information from the capturer. |
- if (audio_device_) |
- audio_device_->AddAudioCapturer(this); |
+ // Register this source with the WebRtcAudioDeviceImpl. |
+ rtc_audio_device->AddAudioCapturer(this); |
return true; |
} |
-WebRtcAudioCapturer::WebRtcAudioCapturer( |
- int render_frame_id, |
- const StreamDeviceInfo& device_info, |
- const blink::WebMediaConstraints& constraints, |
- WebRtcAudioDeviceImpl* audio_device, |
- MediaStreamAudioSource* audio_source) |
- : constraints_(constraints), |
- audio_processor_(new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
- constraints, |
- device_info.device.input, |
- audio_device)), |
- running_(false), |
- render_frame_id_(render_frame_id), |
- device_info_(device_info), |
- volume_(0), |
- peer_connection_mode_(false), |
- audio_device_(audio_device), |
- audio_source_(audio_source) { |
- DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; |
-} |
- |
-WebRtcAudioCapturer::~WebRtcAudioCapturer() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- DCHECK(tracks_.IsEmpty()); |
- DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; |
- Stop(); |
-} |
- |
-void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- DCHECK(track); |
- DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; |
- |
- track->SetLevel(level_calculator_.level()); |
- |
- // The track only grabs stats from the audio processor. Stats are only |
- // available if audio processing is turned on. Therefore, only provide the |
- // track a reference if audio processing is turned on. |
- if (audio_processor_->has_audio_processing()) |
- track->SetAudioProcessor(audio_processor_); |
- |
- { |
- base::AutoLock auto_lock(lock_); |
- // Verify that |track| is not already added to the list. |
- DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track))); |
- |
- // Add with a tag, so we remember to call OnSetFormat() on the new |
- // track. |
- scoped_refptr<TrackOwner> track_owner(new TrackOwner(track)); |
- tracks_.AddAndTag(track_owner.get()); |
- } |
-} |
- |
-void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { |
+void ProcessedLocalAudioSource::EnsureSourceIsStopped() { |
DCHECK(thread_checker_.CalledOnValidThread()); |
- DVLOG(1) << "WebRtcAudioCapturer::RemoveTrack()"; |
- bool stop_source = false; |
- { |
- base::AutoLock auto_lock(lock_); |
- |
- scoped_refptr<TrackOwner> removed_item = |
- tracks_.Remove(TrackOwner::TrackWrapper(track)); |
- |
- // Clear the delegate to ensure that no more capture callbacks will |
- // be sent to this sink. Also avoids a possible crash which can happen |
- // if this method is called while capturing is active. |
- if (removed_item.get()) { |
- removed_item->Reset(); |
- stop_source = tracks_.IsEmpty(); |
- } |
- } |
- if (stop_source) { |
- // Since WebRtcAudioCapturer does not inherit MediaStreamAudioSource, |
- // and instead MediaStreamAudioSource is composed of a WebRtcAudioCapturer, |
- // we have to call StopSource on the MediaStreamSource. This will call |
- // MediaStreamAudioSource::DoStopSource which in turn call |
- // WebRtcAudioCapturerer::Stop(); |
- audio_source_->StopSource(); |
- } |
-} |
-void WebRtcAudioCapturer::SetCapturerSourceInternal( |
- const scoped_refptr<media::AudioCapturerSource>& source, |
- media::ChannelLayout channel_layout, |
- int sample_rate) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," |
- << "sample_rate=" << sample_rate << ")"; |
- scoped_refptr<media::AudioCapturerSource> old_source; |
- { |
- base::AutoLock auto_lock(lock_); |
- if (source_.get() == source.get()) |
- return; |
- |
- source_.swap(old_source); |
- source_ = source; |
- |
- // Reset the flag to allow starting the new source. |
- running_ = false; |
- } |
- |
- DVLOG(1) << "Switching to a new capture source."; |
- if (old_source.get()) |
- old_source->Stop(); |
- |
- // Dispatch the new parameters both to the sink(s) and to the new source, |
- // also apply the new |constraints|. |
- // The idea is to get rid of any dependency of the microphone parameters |
- // which would normally be used by default. |
- // bits_per_sample is always 16 for now. |
- media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
- channel_layout, sample_rate, 16, |
- GetBufferSize(sample_rate)); |
- params.set_effects(device_info_.device.input.effects); |
- DCHECK(params.IsValid()); |
- |
- { |
- base::AutoLock auto_lock(lock_); |
- |
- // Notify the |audio_processor_| of the new format. We're doing this while |
- // the lock is held only because the signaling thread might be calling |
- // GetInputFormat(). Simultaneous reads from the audio thread are NOT the |
- // concern here since the source is currently stopped (i.e., no audio |
- // capture calls can be executing). |
- audio_processor_->OnCaptureFormatChanged(params); |
- |
- // Notify all tracks about the new format. |
- tracks_.TagAll(); |
- } |
- |
- if (source.get()) |
- source->Initialize(params, this, device_info_.session_id); |
- |
- Start(); |
-} |
- |
-void WebRtcAudioCapturer::EnablePeerConnectionMode() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- DVLOG(1) << "EnablePeerConnectionMode"; |
- // Do nothing if the peer connection mode has been enabled. |
- if (peer_connection_mode_) |
+ if (!source_) |
return; |
- peer_connection_mode_ = true; |
- int render_frame_id = -1; |
- media::AudioParameters input_params; |
- { |
- base::AutoLock auto_lock(lock_); |
- // Simply return if there is no existing source or the |render_frame_id_| is |
- // not valid. |
- if (!source_.get() || render_frame_id_ == -1) |
- return; |
- |
- render_frame_id = render_frame_id_; |
- input_params = audio_processor_->InputFormat(); |
- } |
- |
- // Do nothing if the current buffer size is the WebRtc native buffer size. |
- if (GetBufferSize(input_params.sample_rate()) == |
- input_params.frames_per_buffer()) { |
- return; |
+ if (WebRtcAudioDeviceImpl* rtc_audio_device = |
+ pc_factory_->GetWebRtcAudioDevice()) { |
+ rtc_audio_device->RemoveAudioCapturer(this); |
} |
- // Create a new audio stream as source which will open the hardware using |
- // WebRtc native buffer size. |
- SetCapturerSourceInternal( |
- AudioDeviceFactory::NewAudioCapturerSource(render_frame_id), |
- input_params.channel_layout(), input_params.sample_rate()); |
-} |
- |
-void WebRtcAudioCapturer::Start() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- DVLOG(1) << "WebRtcAudioCapturer::Start()"; |
- base::AutoLock auto_lock(lock_); |
- if (running_ || !source_.get()) |
- return; |
- |
- // Start the data source, i.e., start capturing data from the current source. |
- // We need to set the AGC control before starting the stream. |
- source_->SetAutomaticGainControl(true); |
- source_->Start(); |
- running_ = true; |
-} |
- |
-void WebRtcAudioCapturer::Stop() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- DVLOG(1) << "WebRtcAudioCapturer::Stop()"; |
- scoped_refptr<media::AudioCapturerSource> source; |
- TrackList::ItemList tracks; |
+ // Note: Stopping the source while holding the |volume_lock_| because the |
+ // SetVolume() method needs to know whether |source_| is valid. |
{ |
- base::AutoLock auto_lock(lock_); |
- if (!running_) |
- return; |
- |
- source = source_; |
- tracks = tracks_.Items(); |
- tracks_.Clear(); |
- running_ = false; |
+ base::AutoLock auto_lock(volume_lock_); |
+ source_->Stop(); |
+ source_ = nullptr; |
} |
- // Remove the capturer object from the WebRtcAudioDeviceImpl. |
- if (audio_device_) |
- audio_device_->RemoveAudioCapturer(this); |
- |
- for (TrackList::ItemList::const_iterator it = tracks.begin(); |
- it != tracks.end(); |
- ++it) { |
- (*it)->Stop(); |
- } |
- |
- if (source.get()) |
- source->Stop(); |
- |
// Stop the audio processor to avoid feeding render data into the processor. |
audio_processor_->Stop(); |
+ |
+ VLOG(1) << "Stopped WebRTC audio pipeline for consumption by render frame " |
+ << consumer_render_frame_id_ << '.'; |
} |
-void WebRtcAudioCapturer::SetVolume(int volume) { |
- DVLOG(1) << "WebRtcAudioCapturer::SetVolume()"; |
+void ProcessedLocalAudioSource::SetVolume(int volume) { |
+ DVLOG(1) << "ProcessedLocalAudioSource::SetVolume()"; |
DCHECK_LE(volume, MaxVolume()); |
double normalized_volume = static_cast<double>(volume) / MaxVolume(); |
- base::AutoLock auto_lock(lock_); |
- if (source_.get()) |
+ base::AutoLock auto_lock(volume_lock_); |
+ if (source_) |
source_->SetVolume(normalized_volume); |
} |
-int WebRtcAudioCapturer::Volume() const { |
- base::AutoLock auto_lock(lock_); |
+int ProcessedLocalAudioSource::Volume() const { |
+ base::AutoLock auto_lock(volume_lock_); |
return volume_; |
} |
-int WebRtcAudioCapturer::MaxVolume() const { |
+int ProcessedLocalAudioSource::MaxVolume() const { |
return WebRtcAudioDeviceImpl::kMaxVolumeLevel; |
} |
-media::AudioParameters WebRtcAudioCapturer::GetOutputFormat() const { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- return audio_processor_->OutputFormat(); |
-} |
- |
-void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, |
- int audio_delay_milliseconds, |
- double volume, |
- bool key_pressed) { |
-// This callback is driven by AudioInputDevice::AudioThreadCallback if |
-// |source_| is AudioInputDevice, otherwise it is driven by client's |
-// CaptureCallback. |
+void ProcessedLocalAudioSource::Capture(const media::AudioBus* audio_bus, |
+ int audio_delay_milliseconds, |
+ double volume, |
+ bool key_pressed) { |
#if defined(OS_WIN) || defined(OS_MACOSX) |
DCHECK_LE(volume, 1.0); |
#elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD) |
@@ -449,22 +297,15 @@ void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, |
// audio/video sync. http://crbug.com/335335 |
const base::TimeTicks reference_clock_snapshot = base::TimeTicks::Now(); |
- TrackList::ItemList tracks; |
- TrackList::ItemList tracks_to_notify_format; |
- int current_volume = 0; |
+ // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC. |
+ // The volume can be higher than 255 on Linux, and it will be cropped to |
+ // 255 since AGC does not allow values out of range. |
+ int current_volume = static_cast<int>((volume * MaxVolume()) + 0.5); |
{ |
- base::AutoLock auto_lock(lock_); |
- if (!running_) |
- return; |
- |
- // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC. |
- // The volume can be higher than 255 on Linux, and it will be cropped to |
- // 255 since AGC does not allow values out of range. |
- volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); |
- current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_; |
- tracks = tracks_.Items(); |
- tracks_.RetrieveAndClearTags(&tracks_to_notify_format); |
+ base::AutoLock auto_lock(volume_lock_); |
+ volume_ = current_volume; |
} |
+ current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_; |
// Sanity-check the input audio format in debug builds. Then, notify the |
// tracks if the format has changed. |
@@ -473,25 +314,18 @@ void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, |
// because the audio processor format changes only occur while audio capture |
// is stopped. |
DCHECK(audio_processor_->InputFormat().IsValid()); |
- DCHECK_EQ(audio_source->channels(), |
- audio_processor_->InputFormat().channels()); |
- DCHECK_EQ(audio_source->frames(), |
+ DCHECK_EQ(audio_bus->channels(), audio_processor_->InputFormat().channels()); |
+ DCHECK_EQ(audio_bus->frames(), |
audio_processor_->InputFormat().frames_per_buffer()); |
- if (!tracks_to_notify_format.empty()) { |
- const media::AudioParameters& output_params = |
- audio_processor_->OutputFormat(); |
- for (const auto& track : tracks_to_notify_format) |
- track->OnSetFormat(output_params); |
- } |
// Figure out if the pre-processed data has any energy or not. This |
// information will be passed to the level calculator to force it to report |
// energy in case the post-processed data is zeroed by the audio processing. |
- const bool force_report_nonzero_energy = !audio_source->AreFramesZero(); |
+ const bool force_report_nonzero_energy = !audio_bus->AreFramesZero(); |
// Push the data to the processor for processing. |
audio_processor_->PushCaptureData( |
- *audio_source, |
+ *audio_bus, |
base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); |
// Process and consume the data in the processor until there is not enough |
@@ -506,10 +340,8 @@ void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, |
level_calculator_.Calculate(*processed_data, force_report_nonzero_energy); |
- const base::TimeTicks processed_data_capture_time = |
- reference_clock_snapshot - processed_data_audio_delay; |
- for (const auto& track : tracks) |
- track->Capture(*processed_data, processed_data_capture_time); |
+ MediaStreamAudioSource::DeliverDataToTracks( |
+ *processed_data, reference_clock_snapshot - processed_data_audio_delay); |
if (new_volume) { |
SetVolume(new_volume); |
@@ -520,47 +352,38 @@ void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, |
} |
} |
-void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { |
- WebRtcLogMessage("WAC::OnCaptureError: " + message); |
+void ProcessedLocalAudioSource::OnCaptureError(const std::string& message) { |
+ WebRtcLogMessage("ProcessedLocalAudioSource::OnCaptureError: " + message); |
} |
-media::AudioParameters WebRtcAudioCapturer::GetInputFormat() const { |
- base::AutoLock auto_lock(lock_); |
- return audio_processor_->InputFormat(); |
+media::AudioParameters ProcessedLocalAudioSource::GetInputFormat() const { |
+ return audio_processor_ ? audio_processor_->InputFormat() |
+ : media::AudioParameters(); |
} |
-int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { |
+int ProcessedLocalAudioSource::GetBufferSize(int sample_rate) const { |
DCHECK(thread_checker_.CalledOnValidThread()); |
#if defined(OS_ANDROID) |
- // TODO(henrika): Tune and adjust buffer size on Android. |
+ // TODO(henrika): Re-evaluate whether to use same logic as other platforms. |
return (2 * sample_rate / 100); |
#endif |
- // PeerConnection is running at a buffer size of 10ms data. A multiple of |
- // 10ms as the buffer size can give the best performance to PeerConnection. |
- int peer_connection_buffer_size = sample_rate / 100; |
- |
- // Use the native hardware buffer size in non peer connection mode when the |
- // platform is using a native buffer size smaller than the PeerConnection |
- // buffer size and audio processing is off. |
- int hardware_buffer_size = device_info_.device.input.frames_per_buffer; |
- if (!peer_connection_mode_ && hardware_buffer_size && |
- hardware_buffer_size <= peer_connection_buffer_size && |
- !audio_processor_->has_audio_processing()) { |
- DVLOG(1) << "WebRtcAudioCapturer is using hardware buffer size " |
- << hardware_buffer_size; |
+ // If audio processing is turned on, require 10ms buffers. |
+ if (audio_processor_->has_audio_processing()) |
+ return (sample_rate / 100); |
+ |
+ // If audio processing is off and the native hardware buffer size was |
+ // provided, use it. It can be harmful, in terms of CPU/power consumption, to |
+ // use smaller buffer sizes than the native size (http://crbug.com/362261). |
+ if (int hardware_buffer_size = device_info().device.input.frames_per_buffer) |
return hardware_buffer_size; |
- } |
+ // If the buffer size is missing from the StreamDeviceInfo, provide 10ms as a |
+ // fall-back. |
+ // |
+ // TODO(miu): Identify where/why the buffer size might be missing, fix the |
+ // code, and then require it here. |
return (sample_rate / 100); |
} |
-void WebRtcAudioCapturer::SetCapturerSource( |
- const scoped_refptr<media::AudioCapturerSource>& source, |
- media::AudioParameters params) { |
- // Create a new audio stream as source which uses the new source. |
- SetCapturerSourceInternal(source, params.channel_layout(), |
- params.sample_rate()); |
-} |
- |
} // namespace content |