| OLD | NEW |
| (Empty) |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/renderer/media/webrtc/peer_connection_remote_audio_source.h" | |
| 6 | |
| 7 #include "base/logging.h" | |
| 8 #include "base/time/time.h" | |
| 9 #include "media/base/audio_bus.h" | |
| 10 | |
| 11 namespace content { | |
| 12 | |
| 13 namespace { | |
| 14 // Used as an identifier for the down-casters. | |
| 15 void* const kClassIdentifier = const_cast<void**>(&kClassIdentifier); | |
| 16 } // namespace | |
| 17 | |
| 18 PeerConnectionRemoteAudioTrack::PeerConnectionRemoteAudioTrack( | |
| 19 scoped_refptr<webrtc::AudioTrackInterface> track_interface) | |
| 20 : MediaStreamAudioTrack(false /* is_local_track */), | |
| 21 track_interface_(std::move(track_interface)) { | |
| 22 DVLOG(1) | |
| 23 << "PeerConnectionRemoteAudioTrack::PeerConnectionRemoteAudioTrack()"; | |
| 24 } | |
| 25 | |
| 26 PeerConnectionRemoteAudioTrack::~PeerConnectionRemoteAudioTrack() { | |
| 27 DVLOG(1) | |
| 28 << "PeerConnectionRemoteAudioTrack::~PeerConnectionRemoteAudioTrack()"; | |
| 29 // Ensure the track is stopped. | |
| 30 MediaStreamAudioTrack::Stop(); | |
| 31 } | |
| 32 | |
| 33 // static | |
| 34 PeerConnectionRemoteAudioTrack* PeerConnectionRemoteAudioTrack::From( | |
| 35 MediaStreamAudioTrack* track) { | |
| 36 if (track && track->GetClassIdentifier() == kClassIdentifier) | |
| 37 return static_cast<PeerConnectionRemoteAudioTrack*>(track); | |
| 38 return nullptr; | |
| 39 } | |
| 40 | |
| 41 void PeerConnectionRemoteAudioTrack::SetEnabled(bool enabled) { | |
| 42 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 43 | |
| 44 // This affects the shared state of the source for whether or not it's a part | |
| 45 // of the mixed audio that's rendered for remote tracks from WebRTC. | |
| 46 // All tracks from the same source will share this state and thus can step | |
| 47 // on each other's toes. | |
| 48 // This is also why we can't check the enabled state for equality with | |
| 49 // |enabled| before setting the mixing enabled state. This track's enabled | |
| 50 // state and the shared state might not be the same. | |
| 51 track_interface_->set_enabled(enabled); | |
| 52 | |
| 53 MediaStreamAudioTrack::SetEnabled(enabled); | |
| 54 } | |
| 55 | |
| 56 void* PeerConnectionRemoteAudioTrack::GetClassIdentifier() const { | |
| 57 return kClassIdentifier; | |
| 58 } | |
| 59 | |
| 60 PeerConnectionRemoteAudioSource::PeerConnectionRemoteAudioSource( | |
| 61 scoped_refptr<webrtc::AudioTrackInterface> track_interface) | |
| 62 : MediaStreamAudioSource(false /* is_local_source */), | |
| 63 track_interface_(std::move(track_interface)), | |
| 64 is_sink_of_peer_connection_(false) { | |
| 65 DCHECK(track_interface_); | |
| 66 DVLOG(1) | |
| 67 << "PeerConnectionRemoteAudioSource::PeerConnectionRemoteAudioSource()"; | |
| 68 } | |
| 69 | |
| 70 PeerConnectionRemoteAudioSource::~PeerConnectionRemoteAudioSource() { | |
| 71 DVLOG(1) | |
| 72 << "PeerConnectionRemoteAudioSource::~PeerConnectionRemoteAudioSource()"; | |
| 73 EnsureSourceIsStopped(); | |
| 74 } | |
| 75 | |
| 76 std::unique_ptr<MediaStreamAudioTrack> | |
| 77 PeerConnectionRemoteAudioSource::CreateMediaStreamAudioTrack( | |
| 78 const std::string& id) { | |
| 79 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 80 return std::unique_ptr<MediaStreamAudioTrack>( | |
| 81 new PeerConnectionRemoteAudioTrack(track_interface_)); | |
| 82 } | |
| 83 | |
| 84 bool PeerConnectionRemoteAudioSource::EnsureSourceIsStarted() { | |
| 85 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 86 if (is_sink_of_peer_connection_) | |
| 87 return true; | |
| 88 VLOG(1) << "Starting PeerConnection remote audio source with id=" | |
| 89 << track_interface_->id(); | |
| 90 track_interface_->AddSink(this); | |
| 91 is_sink_of_peer_connection_ = true; | |
| 92 return true; | |
| 93 } | |
| 94 | |
| 95 void PeerConnectionRemoteAudioSource::EnsureSourceIsStopped() { | |
| 96 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 97 if (is_sink_of_peer_connection_) { | |
| 98 track_interface_->RemoveSink(this); | |
| 99 is_sink_of_peer_connection_ = false; | |
| 100 VLOG(1) << "Stopped PeerConnection remote audio source with id=" | |
| 101 << track_interface_->id(); | |
| 102 } | |
| 103 } | |
| 104 | |
| 105 void PeerConnectionRemoteAudioSource::OnData(const void* audio_data, | |
| 106 int bits_per_sample, | |
| 107 int sample_rate, | |
| 108 size_t number_of_channels, | |
| 109 size_t number_of_frames) { | |
| 110 // Debug builds: Note that this lock isn't meant to synchronize anything. | |
| 111 // Instead, it is being used as a run-time check to ensure there isn't already | |
| 112 // another thread executing this method. The reason we don't use | |
| 113 // base::ThreadChecker here is because we shouldn't be making assumptions | |
| 114 // about the private threading model of libjingle. For example, it would be | |
| 115 // legitimate for libjingle to use a different thread to invoke this method | |
| 116 // whenever the audio format changes. | |
| 117 #ifndef NDEBUG | |
| 118 const bool is_only_thread_here = single_audio_thread_guard_.Try(); | |
| 119 DCHECK(is_only_thread_here); | |
| 120 #endif | |
| 121 | |
| 122 // TODO(tommi): We should get the timestamp from WebRTC. | |
| 123 base::TimeTicks playout_time(base::TimeTicks::Now()); | |
| 124 | |
| 125 if (!audio_bus_ || | |
| 126 static_cast<size_t>(audio_bus_->channels()) != number_of_channels || | |
| 127 static_cast<size_t>(audio_bus_->frames()) != number_of_frames) { | |
| 128 audio_bus_ = media::AudioBus::Create(number_of_channels, number_of_frames); | |
| 129 } | |
| 130 | |
| 131 audio_bus_->FromInterleaved(audio_data, number_of_frames, | |
| 132 bits_per_sample / 8); | |
| 133 | |
| 134 media::AudioParameters params = MediaStreamAudioSource::GetAudioParameters(); | |
| 135 if (!params.IsValid() || | |
| 136 params.format() != media::AudioParameters::AUDIO_PCM_LOW_LATENCY || | |
| 137 static_cast<size_t>(params.channels()) != number_of_channels || | |
| 138 params.sample_rate() != sample_rate || | |
| 139 static_cast<size_t>(params.frames_per_buffer()) != number_of_frames) { | |
| 140 MediaStreamAudioSource::SetFormat( | |
| 141 media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
| 142 media::GuessChannelLayout(number_of_channels), | |
| 143 sample_rate, bits_per_sample, number_of_frames)); | |
| 144 } | |
| 145 | |
| 146 MediaStreamAudioSource::DeliverDataToTracks(*audio_bus_, playout_time); | |
| 147 | |
| 148 #ifndef NDEBUG | |
| 149 single_audio_thread_guard_.Release(); | |
| 150 #endif | |
| 151 } | |
| 152 | |
| 153 } // namespace content | |
| OLD | NEW |