OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "remoting/protocol/webrtc_audio_source_adapter.h" | |
6 | |
7 #include "base/bind.h" | |
8 #include "base/logging.h" | |
9 #include "base/synchronization/lock.h" | |
10 #include "base/threading/thread_checker.h" | |
11 #include "remoting/proto/audio.pb.h" | |
12 #include "remoting/protocol/audio_source.h" | |
13 | |
14 namespace remoting { | |
15 namespace protocol { | |
16 | |
17 static const int kChannels = 2; | |
18 static const int kBytesPerSample = 2; | |
19 | |
20 // Frame size expected by webrtc::AudioTrackSinkInterface. | |
21 static constexpr base::TimeDelta kAudioFrameDuration = | |
22 base::TimeDelta::FromMilliseconds(10); | |
23 | |
24 class WebrtcAudioSourceAdapter::Core { | |
25 public: | |
26 Core(); | |
27 ~Core(); | |
28 | |
29 void Start(std::unique_ptr<AudioSource> audio_source); | |
30 void Pause(bool pause); | |
31 void AddSink(webrtc::AudioTrackSinkInterface* sink); | |
32 void RemoveSink(webrtc::AudioTrackSinkInterface* sink); | |
33 | |
34 private: | |
35 void OnAudioPacket(std::unique_ptr<AudioPacket> packet); | |
36 | |
37 std::unique_ptr<AudioSource> audio_source_; | |
38 | |
39 bool paused_ = false; | |
40 | |
41 int sampling_rate_ = 0; | |
42 | |
43 // webrtc::AudioTrackSinkInterface expects to get audio in 10ms frames (see | |
44 // kAudioFrameDuration). AudioSource may generate AudioPackets for time | |
45 // intervals that are not multiple of 10ms. In that case the left-over samples | |
46 // are kept in |partial_frame_| until the next AudioPacket is captured by the | |
47 // AudioSource. | |
48 std::vector<uint8_t> partial_frame_; | |
49 | |
50 base::ObserverList<webrtc::AudioTrackSinkInterface> audio_sinks_; | |
51 base::Lock audio_sinks_lock_; | |
52 | |
53 base::ThreadChecker thread_checker_; | |
54 }; | |
55 | |
56 WebrtcAudioSourceAdapter::Core::Core() { | |
57 thread_checker_.DetachFromThread(); | |
58 } | |
59 | |
60 WebrtcAudioSourceAdapter::Core::~Core() {} | |
61 | |
62 void WebrtcAudioSourceAdapter::Core::Start( | |
63 std::unique_ptr<AudioSource> audio_source) { | |
64 DCHECK(thread_checker_.CalledOnValidThread()); | |
65 audio_source_ = std::move(audio_source); | |
66 audio_source_->Start( | |
67 base::Bind(&Core::OnAudioPacket, base::Unretained(this))); | |
68 } | |
69 | |
70 void WebrtcAudioSourceAdapter::Core::Pause(bool pause) { | |
71 DCHECK(thread_checker_.CalledOnValidThread()); | |
72 paused_ = pause; | |
73 } | |
74 | |
75 void WebrtcAudioSourceAdapter::Core::AddSink( | |
76 webrtc::AudioTrackSinkInterface* sink) { | |
77 // Can be called on any thread. | |
78 base::AutoLock lock(audio_sinks_lock_); | |
79 audio_sinks_.AddObserver(sink); | |
80 } | |
81 | |
82 void WebrtcAudioSourceAdapter::Core::RemoveSink( | |
83 webrtc::AudioTrackSinkInterface* sink) { | |
84 // Can be called on any thread. | |
85 base::AutoLock lock(audio_sinks_lock_); | |
86 audio_sinks_.RemoveObserver(sink); | |
87 } | |
88 | |
89 void WebrtcAudioSourceAdapter::Core::OnAudioPacket( | |
90 std::unique_ptr<AudioPacket> packet) { | |
91 DCHECK(thread_checker_.CalledOnValidThread()); | |
92 | |
93 if (paused_) | |
94 return; | |
95 | |
96 DCHECK_EQ(packet->channels(), kChannels); | |
97 DCHECK_EQ(packet->bytes_per_sample(), kBytesPerSample); | |
98 | |
99 if (sampling_rate_ != packet->sampling_rate()) { | |
100 sampling_rate_ = packet->sampling_rate(); | |
101 partial_frame_.clear(); | |
102 } | |
103 | |
104 size_t samples_per_frame = | |
105 kAudioFrameDuration * sampling_rate_ / base::TimeDelta::FromSeconds(1); | |
106 size_t bytes_per_frame = kBytesPerSample * kChannels * samples_per_frame; | |
107 | |
108 const std::string& data = packet->data(0); | |
109 | |
110 size_t position = 0; | |
111 | |
112 base::AutoLock lock(audio_sinks_lock_); | |
113 | |
114 if (!partial_frame_.empty()) { | |
115 size_t bytes_to_append = | |
116 std::min(bytes_per_frame - partial_frame_.size(), data.size()); | |
117 position += bytes_to_append; | |
118 partial_frame_.insert(partial_frame_.end(), data.data(), | |
119 data.data() + bytes_to_append); | |
120 if (partial_frame_.size() < bytes_per_frame) { | |
121 // Still don't have full frame. | |
122 return; | |
123 } | |
Jamie
2016/10/05 22:08:11
Here, |partial_frame_| actually contains a complet
Sergey Ulanov
2016/10/08 05:52:47
Done.
| |
124 FOR_EACH_OBSERVER(webrtc::AudioTrackSinkInterface, audio_sinks_, | |
125 OnData(&partial_frame_.front(), kBytesPerSample * 8, | |
126 sampling_rate_, kChannels, samples_per_frame)); | |
127 } | |
128 | |
129 while (position + bytes_per_frame <= data.size()) { | |
130 FOR_EACH_OBSERVER(webrtc::AudioTrackSinkInterface, audio_sinks_, | |
131 OnData(data.data() + position, kBytesPerSample * 8, | |
132 sampling_rate_, kChannels, samples_per_frame)); | |
133 position += bytes_per_frame; | |
134 } | |
135 | |
136 partial_frame_.assign(data.data() + position, data.data() + data.size()); | |
137 } | |
138 | |
139 WebrtcAudioSourceAdapter::WebrtcAudioSourceAdapter( | |
140 scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner) | |
141 : audio_task_runner_(audio_task_runner), core_(new Core()) {} | |
142 | |
143 WebrtcAudioSourceAdapter::~WebrtcAudioSourceAdapter() { | |
144 audio_task_runner_->DeleteSoon(FROM_HERE, core_.release()); | |
145 } | |
146 | |
147 void WebrtcAudioSourceAdapter::Start( | |
148 std::unique_ptr<AudioSource> audio_source) { | |
149 audio_task_runner_->PostTask( | |
150 FROM_HERE, base::Bind(&Core::Start, base::Unretained(core_.get()), | |
151 base::Passed(&audio_source))); | |
152 } | |
153 | |
154 void WebrtcAudioSourceAdapter::Pause(bool pause) { | |
155 audio_task_runner_->PostTask( | |
156 FROM_HERE, | |
157 base::Bind(&Core::Pause, base::Unretained(core_.get()), pause)); | |
158 } | |
159 | |
160 WebrtcAudioSourceAdapter::SourceState WebrtcAudioSourceAdapter::state() const { | |
161 return kLive; | |
162 } | |
163 | |
164 bool WebrtcAudioSourceAdapter::remote() const { | |
165 return false; | |
166 } | |
167 | |
168 void WebrtcAudioSourceAdapter::RegisterAudioObserver(AudioObserver* observer) {} | |
169 | |
170 void WebrtcAudioSourceAdapter::UnregisterAudioObserver( | |
171 AudioObserver* observer) {} | |
172 | |
173 void WebrtcAudioSourceAdapter::AddSink(webrtc::AudioTrackSinkInterface* sink) { | |
174 core_->AddSink(sink); | |
175 } | |
176 void WebrtcAudioSourceAdapter::RemoveSink( | |
177 webrtc::AudioTrackSinkInterface* sink) { | |
178 core_->RemoveSink(sink); | |
179 } | |
180 | |
181 void WebrtcAudioSourceAdapter::RegisterObserver( | |
182 webrtc::ObserverInterface* observer) {} | |
183 void WebrtcAudioSourceAdapter::UnregisterObserver( | |
184 webrtc::ObserverInterface* observer) {} | |
185 | |
186 } // namespace protocol | |
187 } // namespace remoting | |
OLD | NEW |