| Index: content/renderer/media/media_stream_audio_source.cc
|
| diff --git a/content/renderer/media/media_stream_audio_source.cc b/content/renderer/media/media_stream_audio_source.cc
|
| index c643140e4dbdb6ef5d3d294477a3b7f3cb4cd785..db8fff6ca7914550c8bb20904124f5f13fa5bf01 100644
|
| --- a/content/renderer/media/media_stream_audio_source.cc
|
| +++ b/content/renderer/media/media_stream_audio_source.cc
|
| @@ -4,52 +4,435 @@
|
|
|
| #include "content/renderer/media/media_stream_audio_source.h"
|
|
|
| +#include <algorithm>
|
| +
|
| +#include "content/public/renderer/media_stream_audio_sink.h"
|
| +#include "content/renderer/media/audio_device_factory.h"
|
| +#include "content/renderer/media/media_stream_audio_processor.h"
|
| #include "content/renderer/render_frame_impl.h"
|
| +#include "media/audio/audio_input_device.h"
|
| +#include "media/audio/audio_parameters.h"
|
| +#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
|
|
|
| namespace content {
|
|
|
| +namespace {
|
| +
|
| +// If |frames_per_buffer| is invalid, compute 10 ms worth of frames based on the
|
| +// |sample_rate|.
|
| +int FramesPerBufferOrDefault(int frames_per_buffer, int sample_rate) {
|
| + if (frames_per_buffer > 0)
|
| + return frames_per_buffer;
|
| +
|
| + frames_per_buffer = sample_rate / 100;
|
| + DCHECK_GT(frames_per_buffer, 0);
|
| + return frames_per_buffer;
|
| +}
|
| +
|
| +// Returns a media::AudioParameters initialized from the parameters found in
|
| +// |device_info|.
|
| +media::AudioParameters GetParametersFromStreamDeviceInfo(
|
| + const StreamDeviceInfo& device_info) {
|
| + const MediaStreamDevice::AudioDeviceParameters& input_params =
|
| + device_info.device.input;
|
| + DCHECK_GT(input_params.sample_rate, 0);
|
| + DCHECK_GT(input_params.channel_layout, 1);
|
| + return media::AudioParameters(
|
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
|
| + static_cast<media::ChannelLayout>(input_params.channel_layout),
|
| + input_params.sample_rate,
|
| + 16, // Legacy parameter (data is always in 32-bit float format).
|
| + FramesPerBufferOrDefault(input_params.frames_per_buffer,
|
| + input_params.sample_rate));
|
| +}
|
| +
|
| +} // namespace
|
| +
|
| +class MediaStreamAudioSource::AudioTee : public MediaStreamAudioTrack {
|
| + public:
|
| + explicit AudioTee(base::WeakPtr<MediaStreamAudioSource> source)
|
| + : MediaStreamAudioTrack(!source->is_remote_),
|
| + params_(source->params_),
|
| + source_(source),
|
| + is_enabled_(true) {}
|
| +
|
| + ~AudioTee() final { Stop(); }
|
| +
|
| + void AddSink(MediaStreamAudioSink* sink) final {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK(sink);
|
| + {
|
| + base::AutoLock auto_lock(lock_);
|
| + DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end());
|
| + DCHECK(std::find(added_sinks_.begin(), added_sinks_.end(), sink) ==
|
| + added_sinks_.end());
|
| + added_sinks_.push_back(sink);
|
| + }
|
| + sink->OnReadyStateChanged(blink::WebMediaStreamSource::ReadyStateLive);
|
| + }
|
| +
|
| + void RemoveSink(MediaStreamAudioSink* sink) final {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + {
|
| + base::AutoLock auto_lock(lock_);
|
| + auto it = std::find(added_sinks_.begin(), added_sinks_.end(), sink);
|
| + if (it != added_sinks_.end()) {
|
| + added_sinks_.erase(it);
|
| + } else {
|
| + it = std::find(sinks_.begin(), sinks_.end(), sink);
|
| + if (it != sinks_.end())
|
| + sinks_.erase(it);
|
| + }
|
| + }
|
| + sink->OnReadyStateChanged(blink::WebMediaStreamSource::ReadyStateEnded);
|
| + }
|
| +
|
| + webrtc::AudioTrackInterface* GetAudioAdapter() final { return nullptr; }
|
| +
|
| + media::AudioParameters GetOutputFormat() const final { return params_; }
|
| +
|
| + void SetEnabled(bool enabled) final {
|
| + base::AutoLock auto_lock(lock_);
|
| + is_enabled_ = enabled;
|
| + }
|
| +
|
| + void Stop() final {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +
|
| + if (source_)
|
| + source_->StopAudioDeliveryTo(this);
|
| +
|
| + std::vector<MediaStreamAudioSink*> zombies;
|
| + {
|
| + base::AutoLock auto_lock(lock_);
|
| + zombies.swap(sinks_);
|
| + zombies.insert(zombies.end(), added_sinks_.begin(), added_sinks_.end());
|
| + added_sinks_.clear();
|
| + }
|
| + for (MediaStreamAudioSink* zombie : zombies)
|
| + zombie->OnReadyStateChanged(blink::WebMediaStreamSource::ReadyStateEnded);
|
| + }
|
| +
|
| + // Called by the MediaStreamAudioSource to deliver audio data to this track,
|
| + // which in turn delivers the audio to one or more MediaStreamAudioSinks.
|
| + void DeliverDataToSinks(const media::AudioBus& audio_bus,
|
| + base::TimeTicks reference_time) {
|
| + // Lock sink lists while this audio thread is manipulating the lists and
|
| + // invoking the OnData() callback for each sink.
|
| + base::AutoLock auto_lock(lock_);
|
| +
|
| + // If audio delivery is currently disabled, take no actions.
|
| + if (!is_enabled_)
|
| + return;
|
| +
|
| + // For all newly-added sinks, call OnSetFormat() and move them to the active
|
| + // sink list.
|
| + if (!added_sinks_.empty()) {
|
| + for (MediaStreamAudioSink* sink : added_sinks_)
|
| + sink->OnSetFormat(params_);
|
| + sinks_.insert(sinks_.end(), added_sinks_.begin(), added_sinks_.end());
|
| + added_sinks_.clear();
|
| + }
|
| +
|
| + // Deliver the audio data to each sink.
|
| + for (MediaStreamAudioSink* sink : sinks_)
|
| + sink->OnData(audio_bus, reference_time);
|
| + }
|
| +
|
| + private:
|
| + // Source audio parameters.
|
| + const media::AudioParameters params_;
|
| +
|
| + // A weak reference is held to notify the source when this instance is
|
| + // stopped.
|
| + const base::WeakPtr<MediaStreamAudioSource> source_;
|
| +
|
| + // In debug builds, check that MediaStreamAudioTrack methods are being called
|
| + // on the same thread.
|
| + base::ThreadChecker thread_checker_;
|
| +
|
| + // Lock protects concurrent access to the sink lists below and the
|
| + // |is_enabled_| state, between the main thread and the audio thread.
|
| + base::Lock lock_;
|
| +
|
| + // Sinks added via AddSink() that need to have an initial OnSetFormat() call
|
| + // on the audio thread before audio data is first delivered.
|
| + std::vector<MediaStreamAudioSink*> added_sinks_;
|
| +
|
| + // Sinks that have had OnSetFormat() called and are receiving audio data. On
|
| + // the audio thread, sinks are taken from |added_sinks_| and added to
|
| + // |sinks_|.
|
| + std::vector<MediaStreamAudioSink*> sinks_;
|
| +
|
| + // When false, delivery of audio data is temporarily halted to the sinks.
|
| + bool is_enabled_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(AudioTee);
|
| +};
|
| +
|
| +MediaStreamAudioSource::MediaStreamAudioSource()
|
| + : params_(
|
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
|
| + media::CHANNEL_LAYOUT_STEREO,
|
| + media::AudioParameters::kAudioCDSampleRate,
|
| + 16, // Legacy parameter (data is always in 32-bit float format).
|
| + FramesPerBufferOrDefault(0,
|
| + media::AudioParameters::kAudioCDSampleRate)),
|
| + is_remote_(false),
|
| + consumer_render_frame_id_(-1),
|
| + current_state_(NULL_SOURCE_NOT_STARTED),
|
| + pc_factory_(nullptr),
|
| + weak_factory_(this) {}
|
| +
|
| MediaStreamAudioSource::MediaStreamAudioSource(
|
| - int render_frame_id,
|
| - const StreamDeviceInfo& device_info,
|
| - const SourceStoppedCallback& stop_callback,
|
| - PeerConnectionDependencyFactory* factory)
|
| - : render_frame_id_(render_frame_id), factory_(factory) {
|
| + int consumer_render_frame_id,
|
| + const StreamDeviceInfo& device_info)
|
| + : params_(GetParametersFromStreamDeviceInfo(device_info)),
|
| + is_remote_(false),
|
| + consumer_render_frame_id_(consumer_render_frame_id),
|
| + current_state_(INPUT_DEVICE_NOT_STARTED),
|
| + pc_factory_(nullptr),
|
| + weak_factory_(this) {
|
| + DCHECK(params_.IsValid());
|
| + DCHECK(RenderFrameImpl::FromRoutingID(consumer_render_frame_id_));
|
| SetDeviceInfo(device_info);
|
| - SetStopCallback(stop_callback);
|
| }
|
|
|
| -MediaStreamAudioSource::MediaStreamAudioSource()
|
| - : render_frame_id_(-1), factory_(NULL) {
|
| +MediaStreamAudioSource::MediaStreamAudioSource(
|
| + const scoped_refptr<media::AudioCapturerSource>& source,
|
| + int sample_rate,
|
| + media::ChannelLayout channel_layout,
|
| + int frames_per_buffer,
|
| + bool is_remote)
|
| + : params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
|
| + channel_layout,
|
| + sample_rate,
|
| + 16, // Legacy parameter (data is always in 32-bit float format).
|
| + FramesPerBufferOrDefault(frames_per_buffer, sample_rate)),
|
| + is_remote_(is_remote),
|
| + consumer_render_frame_id_(-1),
|
| + current_state_(SOURCE_NOT_STARTED),
|
| + source_(source),
|
| + pc_factory_(nullptr),
|
| + weak_factory_(this) {
|
| + DCHECK(params_.IsValid());
|
| }
|
|
|
| -MediaStreamAudioSource::~MediaStreamAudioSource() {}
|
| +MediaStreamAudioSource::~MediaStreamAudioSource() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + if (current_state_ != STOPPED)
|
| + StopSource();
|
| + DCHECK_EQ(current_state_, STOPPED);
|
| +}
|
| +
|
| +bool MediaStreamAudioSource::ConnectToTrack(
|
| + const blink::WebMediaStreamTrack& track,
|
| + const blink::WebMediaConstraints& constraints) {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK(!track.isNull());
|
| +
|
| + // Sanity-check that there is not already a MediaStreamAudioTrack instance
|
| + // associated with |track|.
|
| + if (MediaStreamAudioTrack::GetTrack(track)) {
|
| + NOTREACHED()
|
| + << "Attempting to connect another source to a WebMediaStreamTrack.";
|
| + return false;
|
| + }
|
| +
|
| + if (!EnsureSourceIsStarted(constraints))
|
| + return false;
|
| +
|
| + // Delegate track creation/connection to the WebRTC audio pipeline, if
|
| + // necessary.
|
| + if (current_state_ == STARTED_WEBRTC_PIPELINE) {
|
| + DCHECK(pc_factory_);
|
| + pc_factory_->CreateLocalAudioTrack(track);
|
| + return true;
|
| + }
|
| +
|
| + // Create an AudioTee to deliver audio data directly from the |source_| to all
|
| + // sinks managed by the AudioTee. Pass ownership of it to the
|
| + // WebMediaStreamTrack.
|
| + DCHECK_EQ(current_state_, STARTED_DEFAULT_PIPELINE);
|
| + AudioTee* const tee = new AudioTee(weak_factory_.GetWeakPtr());
|
| + tee->SetEnabled(track.isEnabled());
|
| + blink::WebMediaStreamTrack mutable_track = track;
|
| + mutable_track.setExtraData(tee); // Takes ownership.
|
| + {
|
| + base::AutoLock auto_lock(lock_);
|
| + audio_tees_.push_back(tee);
|
| + }
|
| + return true;
|
| +}
|
|
|
| void MediaStreamAudioSource::DoStopSource() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +
|
| + if (current_state_ == STOPPED)
|
| + return;
|
| + current_state_ = STOPPED;
|
| +
|
| + if (source_)
|
| + source_->Stop();
|
| +
|
| if (audio_capturer_.get())
|
| audio_capturer_->Stop();
|
| }
|
|
|
| -void MediaStreamAudioSource::AddTrack(
|
| - const blink::WebMediaStreamTrack& track,
|
| - const blink::WebMediaConstraints& constraints,
|
| - const ConstraintsCallback& callback) {
|
| - // TODO(xians): Properly implement for audio sources.
|
| - if (!local_audio_source_.get()) {
|
| - if (!factory_->InitializeMediaStreamAudioSource(render_frame_id_,
|
| +bool MediaStreamAudioSource::EnsureSourceIsStarted(
|
| + const blink::WebMediaConstraints& constraints) {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +
|
| + switch (current_state_) {
|
| + case NULL_SOURCE_NOT_STARTED:
|
| + if (pc_factory_) {
|
| + StartWebRtcPipeline(constraints);
|
| + } else {
|
| + StartDefaultPipeline();
|
| + }
|
| +
|
| + if (current_state_ == STOPPED) {
|
| + VLOG(1) << "Failed to start null source.";
|
| + } else {
|
| + VLOG(1) << "Started null source using "
|
| + << (current_state_ == STARTED_DEFAULT_PIPELINE ? "default"
|
| + : "WebRTC")
|
| + << " audio pipeline, audio parameters={"
|
| + << params_.AsHumanReadableString() << "}.";
|
| + }
|
| + break;
|
| +
|
| + case INPUT_DEVICE_NOT_STARTED:
|
| + // Sanity-check that the consuming RenderFrame still exists. This is
|
| + // required by AudioDeviceFactory.
|
| + if (!RenderFrameImpl::FromRoutingID(consumer_render_frame_id_))
|
| + break;
|
| +
|
| + // If there is no PeerConnectionFactory instance, or if the audio
|
| + // processing pipeline should not be used, create a direct route for
|
| + // unmodified audio data from the local source.
|
| + if (!pc_factory_ ||
|
| + IsContentCaptureMediaType(device_info().device.type) ||
|
| + !MediaStreamAudioProcessor::ShouldRouteAudioThroughProcessor(
|
| + constraints, device_info().device.input.effects)) {
|
| + source_ = AudioDeviceFactory::NewInputDevice(consumer_render_frame_id_);
|
| + StartDefaultPipeline();
|
| + } else if (pc_factory_) {
|
| + StartWebRtcPipeline(constraints);
|
| + } else {
|
| + NOTREACHED() << "Failed to determine which audio pipeline to use.";
|
| + StopSource();
|
| + }
|
| +
|
| + if (current_state_ == STOPPED) {
|
| + VLOG(1) << "Failed to start input device.";
|
| + } else {
|
| + VLOG(1) << "Started input device and using "
|
| + << (current_state_ == STARTED_DEFAULT_PIPELINE ? "default"
|
| + : "WebRTC")
|
| + << " audio pipeline, session_id=" << device_info().session_id
|
| + << ", audio parameters={" << params_.AsHumanReadableString()
|
| + << "}.";
|
| + }
|
| + break;
|
| +
|
| + case SOURCE_NOT_STARTED:
|
| + StartDefaultPipeline();
|
| + DCHECK_EQ(current_state_, STARTED_DEFAULT_PIPELINE);
|
| + VLOG(1) << "Started externally-provided "
|
| + << (is_remote_ ? "remote" : "local")
|
| + << " source and using default audio pipeline with audio "
|
| + "parameters={"
|
| + << params_.AsHumanReadableString() << "}.";
|
| + break;
|
| +
|
| + case STARTED_DEFAULT_PIPELINE:
|
| + case STARTED_WEBRTC_PIPELINE:
|
| + case STOPPED:
|
| + break;
|
| + }
|
| +
|
| + return current_state_ == STARTED_DEFAULT_PIPELINE ||
|
| + current_state_ == STARTED_WEBRTC_PIPELINE;
|
| +}
|
| +
|
| +void MediaStreamAudioSource::StartDefaultPipeline() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +
|
| + if (source_) {
|
| + const int session_id = current_state_ == INPUT_DEVICE_NOT_STARTED
|
| + ? device_info().session_id
|
| + : -1;
|
| + source_->Initialize(params_, this, session_id);
|
| + source_->Start();
|
| + }
|
| +
|
| + current_state_ = STARTED_DEFAULT_PIPELINE;
|
| +}
|
| +
|
| +void MediaStreamAudioSource::StartWebRtcPipeline(
|
| + const blink::WebMediaConstraints& constraints) {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK(pc_factory_);
|
| +
|
| + // TODO(xians): Constraints should only apply to the track instead of the
|
| + // source. See TODO comments in
|
| + // ./webrtc/peer_connection_dependency_factory.cc.
|
| + if (pc_factory_->InitializeMediaStreamAudioSource(consumer_render_frame_id_,
|
| constraints, this)) {
|
| - // The source failed to start.
|
| - // UserMediaClientImpl rely on the |stop_callback| to be triggered when
|
| - // the last track is removed from the source. But in this case, the
|
| - // source is is not even started. So we need to fail both adding the
|
| - // track and trigger |stop_callback|.
|
| - callback.Run(this, MEDIA_DEVICE_TRACK_START_FAILURE, "");
|
| - StopSource();
|
| - return;
|
| - }
|
| + DCHECK(audio_capturer_.get());
|
| + current_state_ = STARTED_WEBRTC_PIPELINE;
|
| + } else {
|
| + // The source failed to start.
|
| + StopSource();
|
| + }
|
| +}
|
| +
|
| +void MediaStreamAudioSource::StopAudioDeliveryTo(
|
| + MediaStreamAudioSource::AudioTee* tee) {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK_NE(current_state_, STARTED_WEBRTC_PIPELINE);
|
| +
|
| + // Remove |tee| from the list of AudioTees. After the last AudioTee is
|
| + // removed, stop the source.
|
| + bool should_stop_source = false;
|
| + {
|
| + base::AutoLock auto_lock(lock_);
|
| + const auto it = std::find(audio_tees_.begin(), audio_tees_.end(), tee);
|
| + if (it != audio_tees_.end())
|
| + audio_tees_.erase(it);
|
| + should_stop_source = audio_tees_.empty();
|
| }
|
| + if (should_stop_source)
|
| + StopSource();
|
| +}
|
| +
|
| +void MediaStreamAudioSource::Capture(const media::AudioBus* audio_bus,
|
| + int audio_delay_milliseconds,
|
| + double volume,
|
| + bool key_pressed) {
|
| + // TODO(miu): Plumbing is needed to determine the actual capture timestamp
|
| + // of the audio, instead of just snapshotting TimeTicks::Now(), for proper
|
| + // audio/video sync. http://crbug.com/335335
|
| + const base::TimeTicks reference_time =
|
| + base::TimeTicks::Now() -
|
| + base::TimeDelta::FromMilliseconds(audio_delay_milliseconds);
|
| +
|
| + // Deliver the audio data to each tee.
|
| + base::AutoLock auto_lock(lock_);
|
| + for (AudioTee* tee : audio_tees_) {
|
| + // As of this writing, |volume| is only used for an "automatic gain control"
|
| + // feature that does not apply here. Thus, assume the volume is always 1.0,
|
| + // which means the audio data should be passed through unmodified.
|
| + tee->DeliverDataToSinks(*audio_bus, reference_time);
|
| + }
|
| +}
|
|
|
| - factory_->CreateLocalAudioTrack(track);
|
| - callback.Run(this, MEDIA_DEVICE_OK, "");
|
| +void MediaStreamAudioSource::OnCaptureError(const std::string& message) {
|
| + // As of this writing, this method doesn't get called for anything uselful,
|
| + // and all other implementors just log the message, but don't disconnect sinks
|
| + // or take any other action. So, just log the error.
|
| + LOG(ERROR) << message;
|
| }
|
|
|
| } // namespace content
|
|
|