OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "remoting/protocol/webrtc_audio_source_adapter.h" |
| 6 |
| 7 #include "base/bind.h" |
| 8 #include "base/logging.h" |
| 9 #include "base/synchronization/lock.h" |
| 10 #include "base/threading/thread_checker.h" |
| 11 #include "remoting/proto/audio.pb.h" |
| 12 #include "remoting/protocol/audio_source.h" |
| 13 |
| 14 namespace remoting { |
| 15 namespace protocol { |
| 16 |
| 17 static const int kChannels = 2; |
| 18 static const int kBytesPerSample = 2; |
| 19 |
| 20 // Frame size expected by webrtc::AudioTrackSinkInterface. |
| 21 static constexpr base::TimeDelta kAudioFrameDuration = |
| 22 base::TimeDelta::FromMilliseconds(10); |
| 23 |
| 24 class WebrtcAudioSourceAdapter::Core { |
| 25 public: |
| 26 Core(); |
| 27 ~Core(); |
| 28 |
| 29 void Start(std::unique_ptr<AudioSource> audio_source); |
| 30 void Pause(bool pause); |
| 31 void AddSink(webrtc::AudioTrackSinkInterface* sink); |
| 32 void RemoveSink(webrtc::AudioTrackSinkInterface* sink); |
| 33 |
| 34 private: |
| 35 void OnAudioPacket(std::unique_ptr<AudioPacket> packet); |
| 36 |
| 37 std::unique_ptr<AudioSource> audio_source_; |
| 38 |
| 39 bool paused_ = false; |
| 40 |
| 41 int sampling_rate_ = 0; |
| 42 |
| 43 // webrtc::AudioTrackSinkInterface expects to get audio in 10ms frames (see |
| 44 // kAudioFrameDuration). AudioSource may generate AudioPackets for time |
| 45 // intervals that are not multiple of 10ms. In that case the left-over samples |
| 46 // are kept in |partial_frame_| until the next AudioPacket is captured by the |
| 47 // AudioSource. |
| 48 std::vector<uint8_t> partial_frame_; |
| 49 |
| 50 base::ObserverList<webrtc::AudioTrackSinkInterface> audio_sinks_; |
| 51 base::Lock audio_sinks_lock_; |
| 52 |
| 53 base::ThreadChecker thread_checker_; |
| 54 }; |
| 55 |
| 56 WebrtcAudioSourceAdapter::Core::Core() { |
| 57 thread_checker_.DetachFromThread(); |
| 58 } |
| 59 |
| 60 WebrtcAudioSourceAdapter::Core::~Core() {} |
| 61 |
| 62 void WebrtcAudioSourceAdapter::Core::Start( |
| 63 std::unique_ptr<AudioSource> audio_source) { |
| 64 DCHECK(thread_checker_.CalledOnValidThread()); |
| 65 audio_source_ = std::move(audio_source); |
| 66 audio_source_->Start( |
| 67 base::Bind(&Core::OnAudioPacket, base::Unretained(this))); |
| 68 } |
| 69 |
| 70 void WebrtcAudioSourceAdapter::Core::Pause(bool pause) { |
| 71 DCHECK(thread_checker_.CalledOnValidThread()); |
| 72 paused_ = pause; |
| 73 } |
| 74 |
| 75 void WebrtcAudioSourceAdapter::Core::AddSink( |
| 76 webrtc::AudioTrackSinkInterface* sink) { |
| 77 // Can be called on any thread. |
| 78 base::AutoLock lock(audio_sinks_lock_); |
| 79 audio_sinks_.AddObserver(sink); |
| 80 } |
| 81 |
| 82 void WebrtcAudioSourceAdapter::Core::RemoveSink( |
| 83 webrtc::AudioTrackSinkInterface* sink) { |
| 84 // Can be called on any thread. |
| 85 base::AutoLock lock(audio_sinks_lock_); |
| 86 audio_sinks_.RemoveObserver(sink); |
| 87 } |
| 88 |
| 89 void WebrtcAudioSourceAdapter::Core::OnAudioPacket( |
| 90 std::unique_ptr<AudioPacket> packet) { |
| 91 DCHECK(thread_checker_.CalledOnValidThread()); |
| 92 |
| 93 if (paused_) |
| 94 return; |
| 95 |
| 96 DCHECK_EQ(packet->channels(), kChannels); |
| 97 DCHECK_EQ(packet->bytes_per_sample(), kBytesPerSample); |
| 98 |
| 99 if (sampling_rate_ != packet->sampling_rate()) { |
| 100 sampling_rate_ = packet->sampling_rate(); |
| 101 partial_frame_.clear(); |
| 102 } |
| 103 |
| 104 size_t samples_per_frame = |
| 105 kAudioFrameDuration * sampling_rate_ / base::TimeDelta::FromSeconds(1); |
| 106 size_t bytes_per_frame = kBytesPerSample * kChannels * samples_per_frame; |
| 107 |
| 108 const std::string& data = packet->data(0); |
| 109 |
| 110 size_t position = 0; |
| 111 |
| 112 base::AutoLock lock(audio_sinks_lock_); |
| 113 |
| 114 if (!partial_frame_.empty()) { |
| 115 size_t bytes_to_append = |
| 116 std::min(bytes_per_frame - partial_frame_.size(), data.size()); |
| 117 position += bytes_to_append; |
| 118 partial_frame_.insert(partial_frame_.end(), data.data(), |
| 119 data.data() + bytes_to_append); |
| 120 if (partial_frame_.size() < bytes_per_frame) { |
| 121 // Still don't have full frame. |
| 122 return; |
| 123 } |
| 124 |
| 125 // Here |partial_frame_| always contains a full frame. |
| 126 DCHECK_EQ(partial_frame_.size(), bytes_per_frame); |
| 127 |
| 128 FOR_EACH_OBSERVER(webrtc::AudioTrackSinkInterface, audio_sinks_, |
| 129 OnData(&partial_frame_.front(), kBytesPerSample * 8, |
| 130 sampling_rate_, kChannels, samples_per_frame)); |
| 131 } |
| 132 |
| 133 while (position + bytes_per_frame <= data.size()) { |
| 134 FOR_EACH_OBSERVER(webrtc::AudioTrackSinkInterface, audio_sinks_, |
| 135 OnData(data.data() + position, kBytesPerSample * 8, |
| 136 sampling_rate_, kChannels, samples_per_frame)); |
| 137 position += bytes_per_frame; |
| 138 } |
| 139 |
| 140 partial_frame_.assign(data.data() + position, data.data() + data.size()); |
| 141 } |
| 142 |
| 143 WebrtcAudioSourceAdapter::WebrtcAudioSourceAdapter( |
| 144 scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner) |
| 145 : audio_task_runner_(audio_task_runner), core_(new Core()) {} |
| 146 |
| 147 WebrtcAudioSourceAdapter::~WebrtcAudioSourceAdapter() { |
| 148 audio_task_runner_->DeleteSoon(FROM_HERE, core_.release()); |
| 149 } |
| 150 |
| 151 void WebrtcAudioSourceAdapter::Start( |
| 152 std::unique_ptr<AudioSource> audio_source) { |
| 153 audio_task_runner_->PostTask( |
| 154 FROM_HERE, base::Bind(&Core::Start, base::Unretained(core_.get()), |
| 155 base::Passed(&audio_source))); |
| 156 } |
| 157 |
| 158 void WebrtcAudioSourceAdapter::Pause(bool pause) { |
| 159 audio_task_runner_->PostTask( |
| 160 FROM_HERE, |
| 161 base::Bind(&Core::Pause, base::Unretained(core_.get()), pause)); |
| 162 } |
| 163 |
| 164 WebrtcAudioSourceAdapter::SourceState WebrtcAudioSourceAdapter::state() const { |
| 165 return kLive; |
| 166 } |
| 167 |
| 168 bool WebrtcAudioSourceAdapter::remote() const { |
| 169 return false; |
| 170 } |
| 171 |
| 172 void WebrtcAudioSourceAdapter::RegisterAudioObserver(AudioObserver* observer) {} |
| 173 |
| 174 void WebrtcAudioSourceAdapter::UnregisterAudioObserver( |
| 175 AudioObserver* observer) {} |
| 176 |
| 177 void WebrtcAudioSourceAdapter::AddSink(webrtc::AudioTrackSinkInterface* sink) { |
| 178 core_->AddSink(sink); |
| 179 } |
| 180 void WebrtcAudioSourceAdapter::RemoveSink( |
| 181 webrtc::AudioTrackSinkInterface* sink) { |
| 182 core_->RemoveSink(sink); |
| 183 } |
| 184 |
| 185 void WebrtcAudioSourceAdapter::RegisterObserver( |
| 186 webrtc::ObserverInterface* observer) {} |
| 187 void WebrtcAudioSourceAdapter::UnregisterObserver( |
| 188 webrtc::ObserverInterface* observer) {} |
| 189 |
| 190 } // namespace protocol |
| 191 } // namespace remoting |
OLD | NEW |