| Index: content/renderer/media/webrtc/processed_local_audio_source.cc
|
| diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc/processed_local_audio_source.cc
|
| similarity index 23%
|
| rename from content/renderer/media/webrtc_audio_capturer.cc
|
| rename to content/renderer/media/webrtc/processed_local_audio_source.cc
|
| index 113233ebefd60e3db1ac9a3c08c5e04fc6d18349..7d4ab030965ac529b4a0425461b90b50fd526f05 100644
|
| --- a/content/renderer/media/webrtc_audio_capturer.cc
|
| +++ b/content/renderer/media/webrtc/processed_local_audio_source.cc
|
| @@ -1,35 +1,79 @@
|
| -// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
| +// Copyright 2016 The Chromium Authors. All rights reserved.
|
| // Use of this source code is governed by a BSD-style license that can be
|
| // found in the LICENSE file.
|
|
|
| -#include "content/renderer/media/webrtc_audio_capturer.h"
|
| +#include "content/renderer/media/webrtc/processed_local_audio_source.h"
|
|
|
| -#include "base/bind.h"
|
| #include "base/logging.h"
|
| -#include "base/macros.h"
|
| #include "base/metrics/histogram.h"
|
| -#include "base/strings/string_util.h"
|
| #include "base/strings/stringprintf.h"
|
| -#include "build/build_config.h"
|
| -#include "content/child/child_process.h"
|
| #include "content/renderer/media/audio_device_factory.h"
|
| #include "content/renderer/media/media_stream_audio_processor.h"
|
| #include "content/renderer/media/media_stream_audio_processor_options.h"
|
| -#include "content/renderer/media/media_stream_audio_source.h"
|
| #include "content/renderer/media/media_stream_constraints_util.h"
|
| +#include "content/renderer/media/rtc_media_constraints.h"
|
| +#include "content/renderer/media/webrtc/processed_local_audio_track.h"
|
| +#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
|
| #include "content/renderer/media/webrtc_audio_device_impl.h"
|
| -#include "content/renderer/media/webrtc_local_audio_track.h"
|
| #include "content/renderer/media/webrtc_logging.h"
|
| +#include "content/renderer/render_frame_impl.h"
|
| +#include "media/audio/audio_input_device.h"
|
| #include "media/audio/sample_rates.h"
|
| +#include "media/base/channel_layout.h"
|
| +#include "third_party/webrtc/api/mediaconstraintsinterface.h"
|
|
|
| namespace content {
|
|
|
| namespace {
|
|
|
| -// Audio buffer sizes are specified in milliseconds.
|
| -const char kAudioLatency[] = "latencyMs";
|
| -const int kMinAudioLatencyMs = 0;
|
| -const int kMaxAudioLatencyMs = 10000;
|
| +// Used as an identifier for ProcessedLocalAudioSource::From().
|
| +void* const kClassIdentifier = const_cast<void**>(&kClassIdentifier);
|
| +
|
| +// Map of corresponding media constraints and platform effects.
|
| +struct {
|
| + const char* constraint;
|
| + const media::AudioParameters::PlatformEffectsMask effect;
|
| +} const kConstraintEffectMap[] = {
|
| + { webrtc::MediaConstraintsInterface::kGoogEchoCancellation,
|
| + media::AudioParameters::ECHO_CANCELLER },
|
| +};
|
| +
|
| +// If any platform effects are available, check them against the constraints.
|
| +// Disable effects to match false constraints, but if a constraint is true, set
|
| +// the constraint to false to later disable the software effect.
|
| +//
|
| +// This function may modify both |constraints| and |effects|.
|
| +void HarmonizeConstraintsAndEffects(RTCMediaConstraints* constraints,
|
| + int* effects) {
|
| + if (*effects != media::AudioParameters::NO_EFFECTS) {
|
| + for (size_t i = 0; i < arraysize(kConstraintEffectMap); ++i) {
|
| + bool value;
|
| + size_t is_mandatory = 0;
|
| + if (!webrtc::FindConstraint(constraints,
|
| + kConstraintEffectMap[i].constraint,
|
| + &value,
|
| + &is_mandatory) || !value) {
|
| + // If the constraint is false, or does not exist, disable the platform
|
| + // effect.
|
| + *effects &= ~kConstraintEffectMap[i].effect;
|
| + DVLOG(1) << "Disabling platform effect: "
|
| + << kConstraintEffectMap[i].effect;
|
| + } else if (*effects & kConstraintEffectMap[i].effect) {
|
| + // If the constraint is true, leave the platform effect enabled, and
|
| + // set the constraint to false to later disable the software effect.
|
| + if (is_mandatory) {
|
| + constraints->AddMandatory(kConstraintEffectMap[i].constraint,
|
| + webrtc::MediaConstraintsInterface::kValueFalse, true);
|
| + } else {
|
| + constraints->AddOptional(kConstraintEffectMap[i].constraint,
|
| + webrtc::MediaConstraintsInterface::kValueFalse, true);
|
| + }
|
| + DVLOG(1) << "Disabling constraint: "
|
| + << kConstraintEffectMap[i].constraint;
|
| + }
|
| + }
|
| + }
|
| +}
|
|
|
| // Method to check if any of the data in |audio_source| has energy.
|
| bool HasDataEnergy(const media::AudioBus& audio_source) {
|
| @@ -47,129 +91,157 @@ bool HasDataEnergy(const media::AudioBus& audio_source) {
|
|
|
| } // namespace
|
|
|
| -// Reference counted container of WebRtcLocalAudioTrack delegate.
|
| -// TODO(xians): Switch to MediaStreamAudioSinkOwner.
|
| -class WebRtcAudioCapturer::TrackOwner
|
| - : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> {
|
| - public:
|
| - explicit TrackOwner(WebRtcLocalAudioTrack* track)
|
| - : delegate_(track) {}
|
| -
|
| - void Capture(const media::AudioBus& audio_bus,
|
| - base::TimeTicks estimated_capture_time,
|
| - bool force_report_nonzero_energy) {
|
| - base::AutoLock lock(lock_);
|
| - if (delegate_) {
|
| - delegate_->Capture(audio_bus,
|
| - estimated_capture_time,
|
| - force_report_nonzero_energy);
|
| - }
|
| - }
|
| +ProcessedLocalAudioSource::ProcessedLocalAudioSource(
|
| + int consumer_render_frame_id,
|
| + const StreamDeviceInfo& device_info,
|
| + PeerConnectionDependencyFactory* factory)
|
| + : MediaStreamAudioSource(true /* is_local_source */),
|
| + consumer_render_frame_id_(consumer_render_frame_id),
|
| + pc_factory_(factory),
|
| + exposed_volume_(0),
|
| + allow_invalid_render_frame_id_for_testing_(false) {
|
| + DCHECK(pc_factory_);
|
| + DVLOG(1) << "ProcessedLocalAudioSource::ProcessedLocalAudioSource()";
|
| + MediaStreamSource::SetDeviceInfo(device_info);
|
| +}
|
|
|
| - void OnSetFormat(const media::AudioParameters& params) {
|
| - base::AutoLock lock(lock_);
|
| - if (delegate_)
|
| - delegate_->OnSetFormat(params);
|
| - }
|
| +ProcessedLocalAudioSource::~ProcessedLocalAudioSource() {
|
| + DVLOG(1) << "ProcessedLocalAudioSource::~ProcessedLocalAudioSource()";
|
| + // Superclass will call StopSource() just in case.
|
| +}
|
|
|
| - void SetAudioProcessor(
|
| - const scoped_refptr<MediaStreamAudioProcessor>& processor) {
|
| - base::AutoLock lock(lock_);
|
| - if (delegate_)
|
| - delegate_->SetAudioProcessor(processor);
|
| - }
|
| +// static
|
| +ProcessedLocalAudioSource* ProcessedLocalAudioSource::From(
|
| + MediaStreamAudioSource* source) {
|
| + if (source && source->GetClassIdentifier() == kClassIdentifier)
|
| + return static_cast<ProcessedLocalAudioSource*>(source);
|
| + return nullptr;
|
| +}
|
|
|
| - void Reset() {
|
| - base::AutoLock lock(lock_);
|
| - delegate_ = NULL;
|
| - }
|
| +void ProcessedLocalAudioSource::SetSourceConstraints(
|
| + const blink::WebMediaConstraints& constraints) {
|
| + DCHECK(!constraints.isNull());
|
| + constraints_ = constraints;
|
| +}
|
|
|
| - void Stop() {
|
| - base::AutoLock lock(lock_);
|
| - DCHECK(delegate_);
|
| +void* ProcessedLocalAudioSource::GetClassIdentifier() const {
|
| + return kClassIdentifier;
|
| +}
|
|
|
| - // This can be reentrant so reset |delegate_| before calling out.
|
| - WebRtcLocalAudioTrack* temp = delegate_;
|
| - delegate_ = NULL;
|
| - temp->Stop();
|
| +void ProcessedLocalAudioSource::DoStopSource() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + if (is_stopped_)
|
| + return;
|
| +
|
| + // Setting |is_stopped_| while holding the |volume_lock_| because the
|
| + // SetVolume() method needs to know whether |input_device_| is valid.
|
| + {
|
| + base::AutoLock auto_lock(volume_lock_);
|
| + is_stopped_ = true;
|
| }
|
|
|
| - // Wrapper which allows to use std::find_if() when adding and removing
|
| - // sinks to/from the list.
|
| - struct TrackWrapper {
|
| - explicit TrackWrapper(WebRtcLocalAudioTrack* track) : track_(track) {}
|
| - bool operator()(
|
| - const scoped_refptr<WebRtcAudioCapturer::TrackOwner>& owner) const {
|
| - return owner->IsEqual(track_);
|
| + if (input_device_) {
|
| + if (WebRtcAudioDeviceImpl* rtc_audio_device =
|
| + pc_factory_->GetWebRtcAudioDevice()) {
|
| + rtc_audio_device->RemoveAudioCapturer(this);
|
| }
|
| - WebRtcLocalAudioTrack* track_;
|
| - };
|
|
|
| - protected:
|
| - virtual ~TrackOwner() {}
|
| + input_device_->Stop();
|
|
|
| - private:
|
| - friend class base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner>;
|
| + // Stop the audio processor to avoid feeding render data into the processor.
|
| + audio_processor_->Stop();
|
|
|
| - bool IsEqual(const WebRtcLocalAudioTrack* other) const {
|
| - base::AutoLock lock(lock_);
|
| - return (other == delegate_);
|
| + VLOG(1) << "Stopped WebRTC audio pipeline for consumption by render frame "
|
| + << consumer_render_frame_id_ << '.';
|
| }
|
| +}
|
|
|
| - // Do NOT reference count the |delegate_| to avoid cyclic reference counting.
|
| - WebRtcLocalAudioTrack* delegate_;
|
| - mutable base::Lock lock_;
|
| -
|
| - DISALLOW_COPY_AND_ASSIGN(TrackOwner);
|
| -};
|
| +scoped_ptr<MediaStreamAudioTrack>
|
| +ProcessedLocalAudioSource::CreateMediaStreamAudioTrack(
|
| + const std::string& id) {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
|
|
| -// static
|
| -scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer(
|
| - int render_frame_id,
|
| - const StreamDeviceInfo& device_info,
|
| - const blink::WebMediaConstraints& constraints,
|
| - WebRtcAudioDeviceImpl* audio_device,
|
| - MediaStreamAudioSource* audio_source) {
|
| - scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(
|
| - render_frame_id, device_info, constraints, audio_device, audio_source);
|
| - if (capturer->Initialize())
|
| - return capturer;
|
| -
|
| - return NULL;
|
| + ProcessedLocalAudioTrack* const audio_track =
|
| + new ProcessedLocalAudioTrack(WebRtcLocalAudioTrackAdapter::Create(
|
| + id, rtc_source_.get(), pc_factory_->GetWebRtcSignalingThread()));
|
| + audio_track->adapter()->SetAudioProcessor(audio_processor_);
|
| + audio_track->adapter()->SetReportedLevel(level_calculator_.reported_level());
|
| + return scoped_ptr<MediaStreamAudioTrack>(audio_track);
|
| }
|
|
|
| -bool WebRtcAudioCapturer::Initialize() {
|
| +bool ProcessedLocalAudioSource::EnsureSourceIsStarted() {
|
| DCHECK(thread_checker_.CalledOnValidThread());
|
| - DVLOG(1) << "WebRtcAudioCapturer::Initialize()";
|
| - WebRtcLogMessage(base::StringPrintf(
|
| - "WAC::Initialize. render_frame_id=%d"
|
| - ", channel_layout=%d, sample_rate=%d, buffer_size=%d"
|
| - ", session_id=%d, paired_output_sample_rate=%d"
|
| - ", paired_output_frames_per_buffer=%d, effects=%d. ",
|
| - render_frame_id_, device_info_.device.input.channel_layout,
|
| - device_info_.device.input.sample_rate,
|
| - device_info_.device.input.frames_per_buffer, device_info_.session_id,
|
| - device_info_.device.matched_output.sample_rate,
|
| - device_info_.device.matched_output.frames_per_buffer,
|
| - device_info_.device.input.effects));
|
| -
|
| - if (render_frame_id_ == -1) {
|
| - // Return true here to allow injecting a new source via
|
| - // SetCapturerSourceForTesting() at a later state.
|
| +
|
| + if (is_stopped_)
|
| + return false;
|
| + if (input_device_)
|
| return true;
|
| +
|
| + // Sanity-check that the consuming RenderFrame still exists. This is
|
| + // required to initialize the audio source.
|
| + if (!allow_invalid_render_frame_id_for_testing_ &&
|
| + !RenderFrameImpl::FromRoutingID(consumer_render_frame_id_)) {
|
| + WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
|
| + " because the render frame does not exist.");
|
| + StopSource();
|
| + return false;
|
| }
|
|
|
| - MediaAudioConstraints audio_constraints(constraints_,
|
| - device_info_.device.input.effects);
|
| - if (!audio_constraints.IsValid())
|
| + // Using |constraints_| as a basis, apply additional default constraints for
|
| + // audio processing and take the |effects| from StreamDeviceInfo into account.
|
| + //
|
| + // TODO(miu): Consolidation of logic needed here: There is both a
|
| + // RTCMediaConstraints and MediaAudioConstraints class, plus the constraints
|
| + // are being modified both within and outside this module. (This problem was
|
| + // exposed after a major refactoring.)
|
| + RTCMediaConstraints rtc_constraints(constraints_);
|
| + MediaAudioConstraints::ApplyFixedAudioConstraints(&rtc_constraints);
|
| + StreamDeviceInfo modified_device_info = device_info();
|
| + HarmonizeConstraintsAndEffects(&rtc_constraints,
|
| + &modified_device_info.device.input.effects);
|
| + MediaStreamSource::SetDeviceInfo(modified_device_info);
|
| + MediaAudioConstraints audio_constraints(
|
| + constraints_, modified_device_info.device.input.effects);
|
| + if (!audio_constraints.IsValid()) {
|
| + WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
|
| + " because MediaAudioConstraints are not valid.");
|
| + StopSource();
|
| return false;
|
| + }
|
|
|
| - media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>(
|
| - device_info_.device.input.channel_layout);
|
| + const MediaStreamDevice::AudioDeviceParameters& source_params =
|
| + modified_device_info.device.input;
|
| + const MediaStreamDevice::AudioDeviceParameters& matched_params =
|
| + modified_device_info.device.matched_output;
|
| + WebRtcLogMessage(base::StringPrintf(
|
| + "ProcessedLocalAudioSource::EnsureSourceIsStarted. PRELIMINARY "
|
| + "parameters: render_frame_id=%d"
|
| + ", channel_layout=%d, sample_rate=%d, buffer_size=%d"
|
| + ", session_id=%d, paired_output_sample_rate=%d"
|
| + ", paired_output_frames_per_buffer=%d, effects=%d.",
|
| + consumer_render_frame_id_, source_params.channel_layout,
|
| + source_params.sample_rate, source_params.frames_per_buffer,
|
| + modified_device_info.session_id, matched_params.sample_rate,
|
| + matched_params.frames_per_buffer, source_params.effects));
|
| +
|
| + // Create the MediaStreamAudioProcessor, bound to the WebRTC audio device
|
| + // module.
|
| + WebRtcAudioDeviceImpl* const rtc_audio_device =
|
| + pc_factory_->GetWebRtcAudioDevice();
|
| + if (!rtc_audio_device) {
|
| + WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
|
| + " because there is no WebRtcAudioDeviceImpl instance.");
|
| + StopSource();
|
| + return false;
|
| + }
|
| + audio_processor_ = new rtc::RefCountedObject<MediaStreamAudioProcessor>(
|
| + constraints_, source_params, rtc_audio_device);
|
|
|
| // If KEYBOARD_MIC effect is set, change the layout to the corresponding
|
| // layout that includes the keyboard mic.
|
| - if ((device_info_.device.input.effects &
|
| + media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>(
|
| + modified_device_info.device.input.channel_layout);
|
| + if ((modified_device_info.device.input.effects &
|
| media::AudioParameters::KEYBOARD_MIC) &&
|
| audio_constraints.GetProperty(
|
| MediaAudioConstraints::kGoogExperimentalNoiseSuppression)) {
|
| @@ -182,299 +254,117 @@ bool WebRtcAudioCapturer::Initialize() {
|
| << channel_layout;
|
| }
|
| }
|
| -
|
| DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
|
| UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
|
| channel_layout, media::CHANNEL_LAYOUT_MAX + 1);
|
|
|
| - // Verify that the reported input channel configuration is supported.
|
| + // Verify that the input channel configuration is supported.
|
| if (channel_layout != media::CHANNEL_LAYOUT_MONO &&
|
| channel_layout != media::CHANNEL_LAYOUT_STEREO &&
|
| channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) {
|
| - DLOG(ERROR) << channel_layout
|
| - << " is not a supported input channel configuration.";
|
| + WebRtcLogMessage(base::StringPrintf(
|
| + "ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
|
| + " because the input channel layout (%d) is not supported.",
|
| + static_cast<int>(channel_layout)));
|
| + StopSource();
|
| return false;
|
| }
|
|
|
| DVLOG(1) << "Audio input hardware sample rate: "
|
| - << device_info_.device.input.sample_rate;
|
| + << modified_device_info.device.input.sample_rate;
|
| media::AudioSampleRate asr;
|
| - if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) {
|
| + if (media::ToAudioSampleRate(modified_device_info.device.input.sample_rate,
|
| + &asr)) {
|
| UMA_HISTOGRAM_ENUMERATION(
|
| "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1);
|
| } else {
|
| UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected",
|
| - device_info_.device.input.sample_rate);
|
| - }
|
| -
|
| - // Initialize the buffer size to zero, which means it wasn't specified.
|
| - // If it is out of range, we return it to zero.
|
| - int buffer_size_ms = 0;
|
| - int buffer_size_samples = 0;
|
| - GetConstraintValueAsInteger(constraints_, kAudioLatency, &buffer_size_ms);
|
| - if (buffer_size_ms < kMinAudioLatencyMs ||
|
| - buffer_size_ms > kMaxAudioLatencyMs) {
|
| - DVLOG(1) << "Ignoring out of range buffer size " << buffer_size_ms;
|
| - } else {
|
| - buffer_size_samples =
|
| - device_info_.device.input.sample_rate * buffer_size_ms / 1000;
|
| - }
|
| - DVLOG_IF(1, buffer_size_samples > 0)
|
| - << "Custom audio buffer size: " << buffer_size_samples << " samples";
|
| -
|
| - // Create and configure the default audio capturing source.
|
| - SetCapturerSourceInternal(
|
| - AudioDeviceFactory::NewInputDevice(render_frame_id_),
|
| - channel_layout,
|
| - device_info_.device.input.sample_rate,
|
| - buffer_size_samples);
|
| -
|
| - // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware
|
| - // information from the capturer.
|
| - if (audio_device_)
|
| - audio_device_->AddAudioCapturer(this);
|
| -
|
| - return true;
|
| -}
|
| -
|
| -WebRtcAudioCapturer::WebRtcAudioCapturer(
|
| - int render_frame_id,
|
| - const StreamDeviceInfo& device_info,
|
| - const blink::WebMediaConstraints& constraints,
|
| - WebRtcAudioDeviceImpl* audio_device,
|
| - MediaStreamAudioSource* audio_source)
|
| - : constraints_(constraints),
|
| - audio_processor_(new rtc::RefCountedObject<MediaStreamAudioProcessor>(
|
| - constraints,
|
| - device_info.device.input,
|
| - audio_device)),
|
| - running_(false),
|
| - render_frame_id_(render_frame_id),
|
| - device_info_(device_info),
|
| - volume_(0),
|
| - peer_connection_mode_(false),
|
| - audio_device_(audio_device),
|
| - audio_source_(audio_source) {
|
| - DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()";
|
| -}
|
| -
|
| -WebRtcAudioCapturer::~WebRtcAudioCapturer() {
|
| - DCHECK(thread_checker_.CalledOnValidThread());
|
| - DCHECK(tracks_.IsEmpty());
|
| - DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()";
|
| - Stop();
|
| -}
|
| -
|
| -void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) {
|
| - DCHECK(track);
|
| - DVLOG(1) << "WebRtcAudioCapturer::AddTrack()";
|
| -
|
| - {
|
| - base::AutoLock auto_lock(lock_);
|
| - // Verify that |track| is not already added to the list.
|
| - DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track)));
|
| -
|
| - // Add with a tag, so we remember to call OnSetFormat() on the new
|
| - // track.
|
| - scoped_refptr<TrackOwner> track_owner(new TrackOwner(track));
|
| - tracks_.AddAndTag(track_owner.get());
|
| - }
|
| -}
|
| -
|
| -void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
|
| - DCHECK(thread_checker_.CalledOnValidThread());
|
| - DVLOG(1) << "WebRtcAudioCapturer::RemoveTrack()";
|
| - bool stop_source = false;
|
| - {
|
| - base::AutoLock auto_lock(lock_);
|
| -
|
| - scoped_refptr<TrackOwner> removed_item =
|
| - tracks_.Remove(TrackOwner::TrackWrapper(track));
|
| -
|
| - // Clear the delegate to ensure that no more capture callbacks will
|
| - // be sent to this sink. Also avoids a possible crash which can happen
|
| - // if this method is called while capturing is active.
|
| - if (removed_item.get()) {
|
| - removed_item->Reset();
|
| - stop_source = tracks_.IsEmpty();
|
| - }
|
| - }
|
| - if (stop_source) {
|
| - // Since WebRtcAudioCapturer does not inherit MediaStreamAudioSource,
|
| - // and instead MediaStreamAudioSource is composed of a WebRtcAudioCapturer,
|
| - // we have to call StopSource on the MediaStreamSource. This will call
|
| - // MediaStreamAudioSource::DoStopSource which in turn call
|
| - // WebRtcAudioCapturerer::Stop();
|
| - audio_source_->StopSource();
|
| + modified_device_info.device.input.sample_rate);
|
| }
|
| -}
|
|
|
| -void WebRtcAudioCapturer::SetCapturerSourceInternal(
|
| - const scoped_refptr<media::AudioCapturerSource>& source,
|
| - media::ChannelLayout channel_layout,
|
| - int sample_rate,
|
| - int buffer_size) {
|
| - DCHECK(thread_checker_.CalledOnValidThread());
|
| - DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << ","
|
| - << "sample_rate=" << sample_rate << ")";
|
| - scoped_refptr<media::AudioCapturerSource> old_source;
|
| - {
|
| - base::AutoLock auto_lock(lock_);
|
| - if (source_.get() == source.get())
|
| - return;
|
| -
|
| - source_.swap(old_source);
|
| - source_ = source;
|
| -
|
| - // Reset the flag to allow starting the new source.
|
| - running_ = false;
|
| - }
|
| -
|
| - DVLOG(1) << "Switching to a new capture source.";
|
| - if (old_source.get())
|
| - old_source->Stop();
|
| -
|
| - // If the buffer size is zero, it has not been specified.
|
| - // We either default to 10ms, or use the hardware buffer size.
|
| - if (buffer_size == 0)
|
| - buffer_size = GetBufferSize(sample_rate);
|
| -
|
| - // Dispatch the new parameters both to the sink(s) and to the new source,
|
| - // also apply the new |constraints|.
|
| - // The idea is to get rid of any dependency of the microphone parameters
|
| - // which would normally be used by default.
|
| - // bits_per_sample is always 16 for now.
|
| - media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
|
| - channel_layout, sample_rate, 16, buffer_size);
|
| - params.set_effects(device_info_.device.input.effects);
|
| -
|
| - {
|
| - base::AutoLock auto_lock(lock_);
|
| - // Notify the |audio_processor_| of the new format.
|
| - audio_processor_->OnCaptureFormatChanged(params);
|
| -
|
| - // Notify all tracks about the new format.
|
| - tracks_.TagAll();
|
| - }
|
| -
|
| - if (source.get())
|
| - source->Initialize(params, this, session_id());
|
| -
|
| - Start();
|
| -}
|
| -
|
| -void WebRtcAudioCapturer::EnablePeerConnectionMode() {
|
| - DCHECK(thread_checker_.CalledOnValidThread());
|
| - DVLOG(1) << "EnablePeerConnectionMode";
|
| - // Do nothing if the peer connection mode has been enabled.
|
| - if (peer_connection_mode_)
|
| - return;
|
| -
|
| - peer_connection_mode_ = true;
|
| - int render_frame_id = -1;
|
| - media::AudioParameters input_params;
|
| - {
|
| - base::AutoLock auto_lock(lock_);
|
| - // Simply return if there is no existing source or the |render_frame_id_| is
|
| - // not valid.
|
| - if (!source_.get() || render_frame_id_ == -1)
|
| - return;
|
| -
|
| - render_frame_id = render_frame_id_;
|
| - input_params = audio_processor_->InputFormat();
|
| - }
|
| + // The buffer size is 20 ms on Android, and 10 ms everywhere else.
|
| +#if defined(OS_ANDROID)
|
| + // TODO(henrika): Tune and adjust buffer size on Android.
|
| + const int buffer_size_samples =
|
| + modified_device_info.device.input.sample_rate / 50;
|
| +#else
|
| + const int buffer_size_samples =
|
| + modified_device_info.device.input.sample_rate / 100;
|
| +#endif
|
|
|
| - // Do nothing if the current buffer size is the WebRtc native buffer size.
|
| - if (GetBufferSize(input_params.sample_rate()) ==
|
| - input_params.frames_per_buffer()) {
|
| - return;
|
| + // Determine the audio format required of the AudioInputDevice. Then, pass
|
| + // that to the |audio_processor_| and set the output format of this
|
| + // ProcessedLocalAudioSource to the processor's output format.
|
| + media::AudioParameters params(
|
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
|
| + modified_device_info.device.input.sample_rate, 16, buffer_size_samples);
|
| + params.set_effects(modified_device_info.device.input.effects);
|
| + DCHECK(params.IsValid());
|
| + audio_processor_->OnCaptureFormatChanged(params);
|
| + MediaStreamAudioSource::SetFormat(audio_processor_->OutputFormat());
|
| +
|
| + // Start the source.
|
| + VLOG(1) << "Starting WebRTC audio source for consumption by render frame "
|
| + << consumer_render_frame_id_ << " with audio parameters={"
|
| + << GetAudioParameters().AsHumanReadableString() << '}';
|
| + scoped_refptr<media::AudioInputDevice> device =
|
| + AudioDeviceFactory::NewInputDevice(consumer_render_frame_id_);
|
| + device->Initialize(params, this, modified_device_info.session_id);
|
| + // We need to set the AGC control before starting the stream.
|
| + device->SetAutomaticGainControl(true);
|
| + device->Start();
|
| + input_device_ = device; // Thread-safe assignment.
|
| +
|
| + // Register this source with the WebRtcAudioDeviceImpl.
|
| + rtc_audio_device->AddAudioCapturer(this);
|
| +
|
| + // Creates a LocalAudioSource object which holds audio options.
|
| + // TODO(xians): The option should apply to the track instead of the source.
|
| + // TODO(perkj): Move audio constraints parsing to Chrome.
|
| + // Currently there are a few constraints that are parsed by libjingle and
|
| + // the state is set to ended if parsing fails.
|
| + rtc_source_ = pc_factory_->CreateLocalAudioSource(&rtc_constraints);
|
| + if (rtc_source_->state() != webrtc::MediaSourceInterface::kLive) {
|
| + WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
|
| + " because the rtc LocalAudioSource is not live.");
|
| + StopSource();
|
| + return false;
|
| }
|
|
|
| - // Create a new audio stream as source which will open the hardware using
|
| - // WebRtc native buffer size.
|
| - SetCapturerSourceInternal(AudioDeviceFactory::NewInputDevice(render_frame_id),
|
| - input_params.channel_layout(),
|
| - input_params.sample_rate(),
|
| - 0);
|
| + return true;
|
| }
|
|
|
| -void WebRtcAudioCapturer::Start() {
|
| - DCHECK(thread_checker_.CalledOnValidThread());
|
| - DVLOG(1) << "WebRtcAudioCapturer::Start()";
|
| - base::AutoLock auto_lock(lock_);
|
| - if (running_ || !source_.get())
|
| - return;
|
| -
|
| - // Start the data source, i.e., start capturing data from the current source.
|
| - // We need to set the AGC control before starting the stream.
|
| - source_->SetAutomaticGainControl(true);
|
| - source_->Start();
|
| - running_ = true;
|
| -}
|
| +void ProcessedLocalAudioSource::SetVolume(int volume) {
|
| + DVLOG(1) << "ProcessedLocalAudioSource::SetVolume()";
|
| + DCHECK_LE(volume, MaxVolume());
|
|
|
| -void WebRtcAudioCapturer::Stop() {
|
| - DCHECK(thread_checker_.CalledOnValidThread());
|
| - DVLOG(1) << "WebRtcAudioCapturer::Stop()";
|
| - scoped_refptr<media::AudioCapturerSource> source;
|
| - TrackList::ItemList tracks;
|
| {
|
| - base::AutoLock auto_lock(lock_);
|
| - if (!running_)
|
| + base::AutoLock auto_lock(volume_lock_);
|
| + if (is_stopped_)
|
| return;
|
| -
|
| - source = source_;
|
| - tracks = tracks_.Items();
|
| - tracks_.Clear();
|
| - running_ = false;
|
| }
|
|
|
| - // Remove the capturer object from the WebRtcAudioDeviceImpl.
|
| - if (audio_device_)
|
| - audio_device_->RemoveAudioCapturer(this);
|
| -
|
| - for (TrackList::ItemList::const_iterator it = tracks.begin();
|
| - it != tracks.end();
|
| - ++it) {
|
| - (*it)->Stop();
|
| + // Assumption: Once |input_device_| is set, it will never change. Thus,
|
| + // there's no need to hold any locks for the following:
|
| + if (input_device_.get()) {
|
| + double normalized_volume = static_cast<double>(volume) / MaxVolume();
|
| + input_device_->SetVolume(normalized_volume);
|
| }
|
| -
|
| - if (source.get())
|
| - source->Stop();
|
| -
|
| - // Stop the audio processor to avoid feeding render data into the processor.
|
| - audio_processor_->Stop();
|
| }
|
|
|
| -void WebRtcAudioCapturer::SetVolume(int volume) {
|
| - DVLOG(1) << "WebRtcAudioCapturer::SetVolume()";
|
| - DCHECK_LE(volume, MaxVolume());
|
| - double normalized_volume = static_cast<double>(volume) / MaxVolume();
|
| - base::AutoLock auto_lock(lock_);
|
| - if (source_.get())
|
| - source_->SetVolume(normalized_volume);
|
| -}
|
| -
|
| -int WebRtcAudioCapturer::Volume() const {
|
| - base::AutoLock auto_lock(lock_);
|
| - return volume_;
|
| +int ProcessedLocalAudioSource::Volume() const {
|
| + base::AutoLock auto_lock(volume_lock_);
|
| + return exposed_volume_;
|
| }
|
|
|
| -int WebRtcAudioCapturer::MaxVolume() const {
|
| +int ProcessedLocalAudioSource::MaxVolume() const {
|
| return WebRtcAudioDeviceImpl::kMaxVolumeLevel;
|
| }
|
|
|
| -media::AudioParameters WebRtcAudioCapturer::GetOutputFormat() const {
|
| - DCHECK(thread_checker_.CalledOnValidThread());
|
| - return audio_processor_->OutputFormat();
|
| -}
|
| -
|
| -void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source,
|
| - int audio_delay_milliseconds,
|
| - double volume,
|
| - bool key_pressed) {
|
| -// This callback is driven by AudioInputDevice::AudioThreadCallback if
|
| -// |source_| is AudioInputDevice, otherwise it is driven by client's
|
| -// CaptureCallback.
|
| +void ProcessedLocalAudioSource::Capture(const media::AudioBus* audio_bus,
|
| + int audio_delay_milliseconds,
|
| + double volume,
|
| + bool key_pressed) {
|
| #if defined(OS_WIN) || defined(OS_MACOSX)
|
| DCHECK_LE(volume, 1.0);
|
| #elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD)
|
| @@ -491,47 +381,27 @@ void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source,
|
| // audio/video sync. http://crbug.com/335335
|
| const base::TimeTicks reference_clock_snapshot = base::TimeTicks::Now();
|
|
|
| - TrackList::ItemList tracks;
|
| - TrackList::ItemList tracks_to_notify_format;
|
| - int current_volume = 0;
|
| + // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC.
|
| + int current_volume = static_cast<int>((volume * MaxVolume()) + 0.5);
|
| {
|
| - base::AutoLock auto_lock(lock_);
|
| - if (!running_)
|
| - return;
|
| -
|
| - // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC.
|
| - // The volume can be higher than 255 on Linux, and it will be cropped to
|
| - // 255 since AGC does not allow values out of range.
|
| - volume_ = static_cast<int>((volume * MaxVolume()) + 0.5);
|
| - current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_;
|
| - tracks = tracks_.Items();
|
| - tracks_.RetrieveAndClearTags(&tracks_to_notify_format);
|
| + base::AutoLock auto_lock(volume_lock_);
|
| + exposed_volume_ = current_volume;
|
| }
|
| + // The volume can be higher than 255 on Linux, and it will be cropped to
|
| + // 255 since AGC does not allow values out of range.
|
| + current_volume = current_volume > MaxVolume() ? MaxVolume() : current_volume;
|
|
|
| DCHECK(audio_processor_->InputFormat().IsValid());
|
| - DCHECK_EQ(audio_source->channels(),
|
| - audio_processor_->InputFormat().channels());
|
| - DCHECK_EQ(audio_source->frames(),
|
| + DCHECK_EQ(audio_bus->channels(), audio_processor_->InputFormat().channels());
|
| + DCHECK_EQ(audio_bus->frames(),
|
| audio_processor_->InputFormat().frames_per_buffer());
|
|
|
| - // Notify the tracks on when the format changes. This will do nothing if
|
| - // |tracks_to_notify_format| is empty.
|
| - const media::AudioParameters& output_params =
|
| - audio_processor_->OutputFormat();
|
| - for (const auto& track : tracks_to_notify_format) {
|
| - track->OnSetFormat(output_params);
|
| - track->SetAudioProcessor(audio_processor_);
|
| - }
|
| -
|
| - // Figure out if the pre-processed data has any energy or not, the
|
| - // information will be passed to the track to force the calculator
|
| - // to report energy in case the post-processed data is zeroed by the audio
|
| - // processing.
|
| - const bool force_report_nonzero_energy = HasDataEnergy(*audio_source);
|
| + // Figure out if the pre-processed data has any energy or not.
|
| + const bool force_report_nonzero_energy = HasDataEnergy(*audio_bus);
|
|
|
| // Push the data to the processor for processing.
|
| audio_processor_->PushCaptureData(
|
| - *audio_source,
|
| + *audio_bus,
|
| base::TimeDelta::FromMilliseconds(audio_delay_milliseconds));
|
|
|
| // Process and consume the data in the processor until there is not enough
|
| @@ -543,85 +413,30 @@ void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source,
|
| current_volume, key_pressed,
|
| &processed_data, &processed_data_audio_delay, &new_volume)) {
|
| DCHECK(processed_data);
|
| - const base::TimeTicks processed_data_capture_time =
|
| - reference_clock_snapshot - processed_data_audio_delay;
|
| - for (const auto& track : tracks) {
|
| - track->Capture(*processed_data,
|
| - processed_data_capture_time,
|
| - force_report_nonzero_energy);
|
| - }
|
|
|
| - if (new_volume) {
|
| - SetVolume(new_volume);
|
| + level_calculator_.Calculate(*processed_data, force_report_nonzero_energy);
|
| +
|
| + MediaStreamAudioSource::DeliverDataToTracks(
|
| + *processed_data, reference_clock_snapshot - processed_data_audio_delay);
|
|
|
| - // Update the |current_volume| to avoid passing the old volume to AGC.
|
| + // TODO(xians): This could result in an IPC call being made for each audio
|
| + // chunk (!). Consider adding throttling logic here.
|
| + if (new_volume != current_volume) {
|
| + SetVolume(new_volume);
|
| current_volume = new_volume;
|
| }
|
| }
|
| }
|
|
|
| -void WebRtcAudioCapturer::OnCaptureError(const std::string& message) {
|
| - WebRtcLogMessage("WAC::OnCaptureError: " + message);
|
| -}
|
| -
|
| -media::AudioParameters WebRtcAudioCapturer::source_audio_parameters() const {
|
| - base::AutoLock auto_lock(lock_);
|
| - return audio_processor_.get() ? audio_processor_->InputFormat()
|
| - : media::AudioParameters();
|
| -}
|
| -
|
| -bool WebRtcAudioCapturer::GetPairedOutputParameters(
|
| - int* session_id,
|
| - int* output_sample_rate,
|
| - int* output_frames_per_buffer) const {
|
| - // Don't set output parameters unless all of them are valid.
|
| - if (device_info_.session_id <= 0 ||
|
| - !device_info_.device.matched_output.sample_rate ||
|
| - !device_info_.device.matched_output.frames_per_buffer)
|
| - return false;
|
| -
|
| - *session_id = device_info_.session_id;
|
| - *output_sample_rate = device_info_.device.matched_output.sample_rate;
|
| - *output_frames_per_buffer =
|
| - device_info_.device.matched_output.frames_per_buffer;
|
| -
|
| - return true;
|
| -}
|
| -
|
| -int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const {
|
| - DCHECK(thread_checker_.CalledOnValidThread());
|
| -#if defined(OS_ANDROID)
|
| - // TODO(henrika): Tune and adjust buffer size on Android.
|
| - return (2 * sample_rate / 100);
|
| -#endif
|
| -
|
| - // PeerConnection is running at a buffer size of 10ms data. A multiple of
|
| - // 10ms as the buffer size can give the best performance to PeerConnection.
|
| - int peer_connection_buffer_size = sample_rate / 100;
|
| -
|
| - // Use the native hardware buffer size in non peer connection mode when the
|
| - // platform is using a native buffer size smaller than the PeerConnection
|
| - // buffer size and audio processing is off.
|
| - int hardware_buffer_size = device_info_.device.input.frames_per_buffer;
|
| - if (!peer_connection_mode_ && hardware_buffer_size &&
|
| - hardware_buffer_size <= peer_connection_buffer_size &&
|
| - !audio_processor_->has_audio_processing()) {
|
| - DVLOG(1) << "WebRtcAudioCapturer is using hardware buffer size "
|
| - << hardware_buffer_size;
|
| - return hardware_buffer_size;
|
| - }
|
| -
|
| - return (sample_rate / 100);
|
| +void ProcessedLocalAudioSource::OnCaptureError(const std::string& message) {
|
| + WebRtcLogMessage("ProcessedLocalAudioSource::OnCaptureError: " + message);
|
| }
|
|
|
| -void WebRtcAudioCapturer::SetCapturerSource(
|
| - const scoped_refptr<media::AudioCapturerSource>& source,
|
| - media::AudioParameters params) {
|
| - // Create a new audio stream as source which uses the new source.
|
| - SetCapturerSourceInternal(source,
|
| - params.channel_layout(),
|
| - params.sample_rate(),
|
| - 0);
|
| +media::AudioParameters ProcessedLocalAudioSource::GetInputAudioParameters()
|
| + const {
|
| + return audio_processor_.get()
|
| + ? audio_processor_->InputFormat()
|
| + : media::AudioParameters();
|
| }
|
|
|
| } // namespace content
|
|
|