Index: content/renderer/media/webrtc/peer_connection_remote_audio_source.cc |
diff --git a/content/renderer/media/webrtc/media_stream_remote_audio_track.cc b/content/renderer/media/webrtc/peer_connection_remote_audio_source.cc |
similarity index 14% |
rename from content/renderer/media/webrtc/media_stream_remote_audio_track.cc |
rename to content/renderer/media/webrtc/peer_connection_remote_audio_source.cc |
index e3940ab72b3bce7149897913d4af49745b673da0..fa6dead15da04fea756c7aa8a0f2995ea9518917 100644 |
--- a/content/renderer/media/webrtc/media_stream_remote_audio_track.cc |
+++ b/content/renderer/media/webrtc/peer_connection_remote_audio_source.cc |
@@ -2,150 +2,64 @@ |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
-#include "content/renderer/media/webrtc/media_stream_remote_audio_track.h" |
- |
-#include <stddef.h> |
- |
-#include <list> |
+#include "content/renderer/media/webrtc/peer_connection_remote_audio_source.h" |
#include "base/logging.h" |
-#include "content/public/renderer/media_stream_audio_sink.h" |
-#include "third_party/webrtc/api/mediastreaminterface.h" |
+#include "base/time/time.h" |
+#include "media/base/audio_bus.h" |
namespace content { |
-class MediaStreamRemoteAudioSource::AudioSink |
- : public webrtc::AudioTrackSinkInterface { |
- public: |
- AudioSink() { |
- } |
- ~AudioSink() override { |
- DCHECK(sinks_.empty()); |
- } |
- |
- void Add(MediaStreamAudioSink* sink, MediaStreamAudioTrack* track, |
- bool enabled) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- SinkInfo info(sink, track, enabled); |
- base::AutoLock lock(lock_); |
- sinks_.push_back(info); |
- } |
- |
- void Remove(MediaStreamAudioSink* sink, MediaStreamAudioTrack* track) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- base::AutoLock lock(lock_); |
- sinks_.remove_if([&sink, &track](const SinkInfo& info) { |
- return info.sink == sink && info.track == track; |
- }); |
- } |
- |
- void SetEnabled(MediaStreamAudioTrack* track, bool enabled) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- base::AutoLock lock(lock_); |
- for (SinkInfo& info : sinks_) { |
- if (info.track == track) |
- info.enabled = enabled; |
- } |
- } |
+namespace { |
+// Used as an identifier for the down-casters. |
+void* const kClassIdentifier = const_cast<void**>(&kClassIdentifier); |
+} // namespace |
- void RemoveAll(MediaStreamAudioTrack* track) { |
- base::AutoLock lock(lock_); |
- sinks_.remove_if([&track](const SinkInfo& info) { |
- return info.track == track; |
- }); |
- } |
- |
- bool IsNeeded() const { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- return !sinks_.empty(); |
- } |
- |
- private: |
- void OnData(const void* audio_data, int bits_per_sample, int sample_rate, |
- size_t number_of_channels, size_t number_of_frames) override { |
- if (!audio_bus_ || |
- static_cast<size_t>(audio_bus_->channels()) != number_of_channels || |
- static_cast<size_t>(audio_bus_->frames()) != number_of_frames) { |
- audio_bus_ = media::AudioBus::Create(number_of_channels, |
- number_of_frames); |
- } |
- |
- audio_bus_->FromInterleaved(audio_data, number_of_frames, |
- bits_per_sample / 8); |
- |
- bool format_changed = false; |
- if (params_.format() != media::AudioParameters::AUDIO_PCM_LOW_LATENCY || |
- static_cast<size_t>(params_.channels()) != number_of_channels || |
- params_.sample_rate() != sample_rate || |
- static_cast<size_t>(params_.frames_per_buffer()) != number_of_frames) { |
- params_ = media::AudioParameters( |
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
- media::GuessChannelLayout(number_of_channels), |
- sample_rate, 16, number_of_frames); |
- format_changed = true; |
- } |
- |
- // TODO(tommi): We should get the timestamp from WebRTC. |
- base::TimeTicks estimated_capture_time(base::TimeTicks::Now()); |
- |
- base::AutoLock lock(lock_); |
- for (const SinkInfo& info : sinks_) { |
- if (info.enabled) { |
- if (format_changed) |
- info.sink->OnSetFormat(params_); |
- info.sink->OnData(*audio_bus_.get(), estimated_capture_time); |
- } |
- } |
- } |
- |
- mutable base::Lock lock_; |
- struct SinkInfo { |
- SinkInfo(MediaStreamAudioSink* sink, MediaStreamAudioTrack* track, |
- bool enabled) : sink(sink), track(track), enabled(enabled) {} |
- MediaStreamAudioSink* sink; |
- MediaStreamAudioTrack* track; |
- bool enabled; |
- }; |
- std::list<SinkInfo> sinks_; |
- base::ThreadChecker thread_checker_; |
- media::AudioParameters params_; // Only used on the callback thread. |
- std::unique_ptr<media::AudioBus> |
- audio_bus_; // Only used on the callback thread. |
-}; |
- |
-MediaStreamRemoteAudioTrack::MediaStreamRemoteAudioTrack( |
- const blink::WebMediaStreamSource& source, bool enabled) |
- : MediaStreamAudioTrack(false), source_(source), enabled_(enabled) { |
- DCHECK(source.getExtraData()); // Make sure the source has a native source. |
+PeerConnectionRemoteAudioTrack::PeerConnectionRemoteAudioTrack( |
+ scoped_refptr<webrtc::AudioTrackInterface> track_interface) |
+ : MediaStreamAudioTrack(false /* is_local_track */), |
+ track_interface_(std::move(track_interface)) { |
+ DVLOG(1) |
+ << "PeerConnectionRemoteAudioTrack::PeerConnectionRemoteAudioTrack()"; |
} |
-MediaStreamRemoteAudioTrack::~MediaStreamRemoteAudioTrack() { |
- DCHECK(main_render_thread_checker_.CalledOnValidThread()); |
+PeerConnectionRemoteAudioTrack::~PeerConnectionRemoteAudioTrack() { |
+ DVLOG(1) |
+ << "PeerConnectionRemoteAudioTrack::~PeerConnectionRemoteAudioTrack()"; |
// Ensure the track is stopped. |
MediaStreamAudioTrack::Stop(); |
} |
-void MediaStreamRemoteAudioTrack::SetEnabled(bool enabled) { |
- DCHECK(main_render_thread_checker_.CalledOnValidThread()); |
+// static |
+PeerConnectionRemoteAudioTrack* PeerConnectionRemoteAudioTrack::From( |
+ MediaStreamAudioTrack* track) { |
+ if (track && track->GetClassIdentifier() == kClassIdentifier) |
+ return static_cast<PeerConnectionRemoteAudioTrack*>(track); |
+ return nullptr; |
+} |
+ |
+void PeerConnectionRemoteAudioTrack::SetEnabled(bool enabled) { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
// This affects the shared state of the source for whether or not it's a part |
// of the mixed audio that's rendered for remote tracks from WebRTC. |
// All tracks from the same source will share this state and thus can step |
// on each other's toes. |
- // This is also why we can't check the |enabled_| state for equality with |
- // |enabled| before setting the mixing enabled state. |enabled_| and the |
- // shared state might not be the same. |
- source()->SetEnabledForMixing(enabled); |
+ // This is also why we can't check the enabled state for equality with |
+ // |enabled| before setting the mixing enabled state. This track's enabled |
+ // state and the shared state might not be the same. |
+ track_interface_->set_enabled(enabled); |
- enabled_ = enabled; |
- source()->SetSinksEnabled(this, enabled); |
+ MediaStreamAudioTrack::SetEnabled(enabled); |
} |
-void MediaStreamRemoteAudioTrack::OnStop() { |
- DCHECK(main_render_thread_checker_.CalledOnValidThread()); |
- DVLOG(1) << "MediaStreamRemoteAudioTrack::OnStop()"; |
+void* PeerConnectionRemoteAudioTrack::GetClassIdentifier() const { |
+ return kClassIdentifier; |
+} |
- source()->RemoveAll(this); |
+void PeerConnectionRemoteAudioTrack::OnStop() { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ DVLOG(1) << "PeerConnectionRemoteAudioTrack::OnStop()"; |
// Stop means that a track should be stopped permanently. But |
// since there is no proper way of doing that on a remote track, we can |
@@ -154,84 +68,95 @@ void MediaStreamRemoteAudioTrack::OnStop() { |
SetEnabled(false); |
} |
-void MediaStreamRemoteAudioTrack::AddSink(MediaStreamAudioSink* sink) { |
- DCHECK(main_render_thread_checker_.CalledOnValidThread()); |
- return source()->AddSink(sink, this, enabled_); |
+PeerConnectionRemoteAudioSource::PeerConnectionRemoteAudioSource( |
+ scoped_refptr<webrtc::AudioTrackInterface> track_interface) |
+ : MediaStreamAudioSource(false /* is_local_source */), |
+ track_interface_(std::move(track_interface)), |
+ is_sink_of_peer_connection_(false) { |
+ DCHECK(track_interface_); |
+ DVLOG(1) |
+ << "PeerConnectionRemoteAudioSource::PeerConnectionRemoteAudioSource()"; |
} |
-void MediaStreamRemoteAudioTrack::RemoveSink(MediaStreamAudioSink* sink) { |
- DCHECK(main_render_thread_checker_.CalledOnValidThread()); |
- return source()->RemoveSink(sink, this); |
+PeerConnectionRemoteAudioSource::~PeerConnectionRemoteAudioSource() { |
+ DVLOG(1) |
+ << "PeerConnectionRemoteAudioSource::~PeerConnectionRemoteAudioSource()"; |
+ EnsureSourceIsStopped(); |
} |
-media::AudioParameters MediaStreamRemoteAudioTrack::GetOutputFormat() const { |
- DCHECK(main_render_thread_checker_.CalledOnValidThread()); |
- // This method is not implemented on purpose and should be removed. |
- // TODO(tommi): See comment for GetOutputFormat in MediaStreamAudioTrack. |
- NOTIMPLEMENTED(); |
- return media::AudioParameters(); |
-} |
- |
-webrtc::AudioTrackInterface* MediaStreamRemoteAudioTrack::GetAudioAdapter() { |
- DCHECK(main_render_thread_checker_.CalledOnValidThread()); |
- return source()->GetAudioAdapter(); |
-} |
- |
-MediaStreamRemoteAudioSource* MediaStreamRemoteAudioTrack::source() const { |
- return static_cast<MediaStreamRemoteAudioSource*>(source_.getExtraData()); |
-} |
- |
-MediaStreamRemoteAudioSource::MediaStreamRemoteAudioSource( |
- const scoped_refptr<webrtc::AudioTrackInterface>& track) : track_(track) {} |
- |
-MediaStreamRemoteAudioSource::~MediaStreamRemoteAudioSource() { |
+std::unique_ptr<MediaStreamAudioTrack> |
+PeerConnectionRemoteAudioSource::CreateMediaStreamAudioTrack( |
+ const std::string& id) { |
DCHECK(thread_checker_.CalledOnValidThread()); |
+ return std::unique_ptr<MediaStreamAudioTrack>( |
+ new PeerConnectionRemoteAudioTrack(track_interface_)); |
} |
-void MediaStreamRemoteAudioSource::SetEnabledForMixing(bool enabled) { |
+bool PeerConnectionRemoteAudioSource::EnsureSourceIsStarted() { |
DCHECK(thread_checker_.CalledOnValidThread()); |
- track_->set_enabled(enabled); |
+ if (is_sink_of_peer_connection_) |
+ return true; |
+ VLOG(1) << "Starting PeerConnection remote audio source with id=" |
+ << track_interface_->id(); |
+ track_interface_->AddSink(this); |
+ is_sink_of_peer_connection_ = true; |
+ return true; |
} |
-void MediaStreamRemoteAudioSource::AddSink(MediaStreamAudioSink* sink, |
- MediaStreamAudioTrack* track, |
- bool enabled) { |
+void PeerConnectionRemoteAudioSource::EnsureSourceIsStopped() { |
DCHECK(thread_checker_.CalledOnValidThread()); |
- if (!sink_) { |
- sink_.reset(new AudioSink()); |
- track_->AddSink(sink_.get()); |
+ if (is_sink_of_peer_connection_) { |
+ track_interface_->RemoveSink(this); |
+ is_sink_of_peer_connection_ = false; |
+ VLOG(1) << "Stopped PeerConnection remote audio source with id=" |
+ << track_interface_->id(); |
} |
- |
- sink_->Add(sink, track, enabled); |
} |
-void MediaStreamRemoteAudioSource::RemoveSink(MediaStreamAudioSink* sink, |
- MediaStreamAudioTrack* track) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- DCHECK(sink_); |
- |
- sink_->Remove(sink, track); |
- |
- if (!sink_->IsNeeded()) { |
- track_->RemoveSink(sink_.get()); |
- sink_.reset(); |
+void PeerConnectionRemoteAudioSource::OnData(const void* audio_data, |
+ int bits_per_sample, |
+ int sample_rate, |
+ size_t number_of_channels, |
+ size_t number_of_frames) { |
+ // Debug builds: Note that this lock isn't meant to synchronize anything. |
+ // Instead, it is being used as a run-time check to ensure there isn't already |
+ // another thread executing this method. The reason we don't use |
+ // base::ThreadChecker here is because we shouldn't be making assumptions |
+ // about the private threading model of libjingle. |
+#ifndef NDEBUG |
+ const bool is_only_thread_here = single_audio_thread_guard_.Try(); |
+ DCHECK(is_only_thread_here); |
+#endif |
+ |
+ // TODO(tommi): We should get the timestamp from WebRTC. |
+ base::TimeTicks playout_time(base::TimeTicks::Now()); |
+ |
+ if (!audio_bus_ || |
+ static_cast<size_t>(audio_bus_->channels()) != number_of_channels || |
+ static_cast<size_t>(audio_bus_->frames()) != number_of_frames) { |
+ audio_bus_ = media::AudioBus::Create(number_of_channels, number_of_frames); |
} |
-} |
-void MediaStreamRemoteAudioSource::SetSinksEnabled(MediaStreamAudioTrack* track, |
- bool enabled) { |
- if (sink_) |
- sink_->SetEnabled(track, enabled); |
-} |
+ audio_bus_->FromInterleaved(audio_data, number_of_frames, |
+ bits_per_sample / 8); |
+ |
+ media::AudioParameters params = MediaStreamAudioSource::GetAudioParameters(); |
+ if (!params.IsValid() || |
+ params.format() != media::AudioParameters::AUDIO_PCM_LOW_LATENCY || |
+ static_cast<size_t>(params.channels()) != number_of_channels || |
+ params.sample_rate() != sample_rate || |
+ static_cast<size_t>(params.frames_per_buffer()) != number_of_frames) { |
+ MediaStreamAudioSource::SetFormat( |
+ media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
+ media::GuessChannelLayout(number_of_channels), |
+ sample_rate, bits_per_sample, number_of_frames)); |
+ } |
-void MediaStreamRemoteAudioSource::RemoveAll(MediaStreamAudioTrack* track) { |
- if (sink_) |
- sink_->RemoveAll(track); |
-} |
+ MediaStreamAudioSource::DeliverDataToTracks(*audio_bus_, playout_time); |
-webrtc::AudioTrackInterface* MediaStreamRemoteAudioSource::GetAudioAdapter() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- return track_.get(); |
+#ifndef NDEBUG |
+ single_audio_thread_guard_.Release(); |
+#endif |
} |
} // namespace content |