Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(51)

Side by Side Diff: content/renderer/media/webrtc/processed_local_audio_source.cc

Issue 1834323002: MediaStream audio: Refactor 3 separate "glue" implementations into one. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Reworked unit tests around structural changes, and added exhaustive media_stream_audio_unittest.cc. Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/webrtc_audio_capturer.h" 5 #include "content/renderer/media/webrtc/processed_local_audio_source.h"
6 6
7 #include "base/bind.h"
8 #include "base/logging.h" 7 #include "base/logging.h"
9 #include "base/macros.h"
10 #include "base/metrics/histogram.h" 8 #include "base/metrics/histogram.h"
11 #include "base/strings/string_util.h"
12 #include "base/strings/stringprintf.h" 9 #include "base/strings/stringprintf.h"
13 #include "build/build_config.h"
14 #include "content/child/child_process.h"
15 #include "content/renderer/media/audio_device_factory.h" 10 #include "content/renderer/media/audio_device_factory.h"
16 #include "content/renderer/media/media_stream_audio_processor.h"
17 #include "content/renderer/media/media_stream_audio_processor_options.h" 11 #include "content/renderer/media/media_stream_audio_processor_options.h"
18 #include "content/renderer/media/media_stream_audio_source.h"
19 #include "content/renderer/media/media_stream_constraints_util.h" 12 #include "content/renderer/media/media_stream_constraints_util.h"
13 #include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
20 #include "content/renderer/media/webrtc_audio_device_impl.h" 14 #include "content/renderer/media/webrtc_audio_device_impl.h"
21 #include "content/renderer/media/webrtc_local_audio_track.h"
22 #include "content/renderer/media/webrtc_logging.h" 15 #include "content/renderer/media/webrtc_logging.h"
16 #include "content/renderer/render_frame_impl.h"
23 #include "media/audio/sample_rates.h" 17 #include "media/audio/sample_rates.h"
18 #include "media/base/channel_layout.h"
19 #include "third_party/webrtc/api/mediaconstraintsinterface.h"
20 #include "third_party/webrtc/media/base/mediachannel.h"
24 21
25 namespace content { 22 namespace content {
26 23
27 // Reference counted container of WebRtcLocalAudioTrack delegate. 24 namespace {
28 // TODO(xians): Switch to MediaStreamAudioSinkOwner. 25 // Used as an identifier for ProcessedLocalAudioSource::From().
29 class WebRtcAudioCapturer::TrackOwner 26 void* const kClassIdentifier = const_cast<void**>(&kClassIdentifier);
30 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { 27 } // namespace
31 public:
32 explicit TrackOwner(WebRtcLocalAudioTrack* track)
33 : delegate_(track) {}
34 28
35 void Capture(const media::AudioBus& audio_bus, 29 ProcessedLocalAudioSource::ProcessedLocalAudioSource(
36 base::TimeTicks estimated_capture_time) { 30 int consumer_render_frame_id,
37 base::AutoLock lock(lock_); 31 const StreamDeviceInfo& device_info,
38 if (delegate_) { 32 PeerConnectionDependencyFactory* factory)
39 delegate_->Capture(audio_bus, estimated_capture_time); 33 : MediaStreamAudioSource(true /* is_local_source */),
40 } 34 consumer_render_frame_id_(consumer_render_frame_id),
35 pc_factory_(factory),
36 volume_(0),
37 allow_invalid_render_frame_id_for_testing_(false) {
38 DCHECK(pc_factory_);
39 DVLOG(1) << "ProcessedLocalAudioSource::ProcessedLocalAudioSource()";
40 MediaStreamSource::SetDeviceInfo(device_info);
41 }
42
43 ProcessedLocalAudioSource::~ProcessedLocalAudioSource() {
44 DVLOG(1) << "ProcessedLocalAudioSource::~ProcessedLocalAudioSource()";
45 EnsureSourceIsStopped();
46 }
47
48 // static
49 ProcessedLocalAudioSource* ProcessedLocalAudioSource::From(
50 MediaStreamAudioSource* source) {
51 if (source && source->GetClassIdentifier() == kClassIdentifier)
52 return static_cast<ProcessedLocalAudioSource*>(source);
53 return nullptr;
54 }
55
56 void ProcessedLocalAudioSource::SetSourceConstraints(
57 const blink::WebMediaConstraints& constraints) {
58 DCHECK(thread_checker_.CalledOnValidThread());
59 DCHECK(!constraints.isNull());
60 DCHECK(!source_);
61 constraints_ = constraints;
62 }
63
64 void* ProcessedLocalAudioSource::GetClassIdentifier() const {
65 return kClassIdentifier;
66 }
67
68 bool ProcessedLocalAudioSource::EnsureSourceIsStarted() {
69 DCHECK(thread_checker_.CalledOnValidThread());
70
71 if (source_)
72 return true;
73
74 // Sanity-check that the consuming RenderFrame still exists. This is required
75 // to initialize the audio source.
76 if (!allow_invalid_render_frame_id_for_testing_ &&
77 !RenderFrameImpl::FromRoutingID(consumer_render_frame_id_)) {
78 WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
79 " because the render frame does not exist.");
80 return false;
41 } 81 }
42 82
43 void OnSetFormat(const media::AudioParameters& params) {
44 base::AutoLock lock(lock_);
45 if (delegate_)
46 delegate_->OnSetFormat(params);
47 }
48
49 void Reset() {
50 base::AutoLock lock(lock_);
51 delegate_ = NULL;
52 }
53
54 void Stop() {
55 base::AutoLock lock(lock_);
56 DCHECK(delegate_);
57
58 // This can be reentrant so reset |delegate_| before calling out.
59 WebRtcLocalAudioTrack* temp = delegate_;
60 delegate_ = NULL;
61 temp->Stop();
62 }
63
64 // Wrapper which allows to use std::find_if() when adding and removing
65 // sinks to/from the list.
66 struct TrackWrapper {
67 explicit TrackWrapper(WebRtcLocalAudioTrack* track) : track_(track) {}
68 bool operator()(
69 const scoped_refptr<WebRtcAudioCapturer::TrackOwner>& owner) const {
70 return owner->IsEqual(track_);
71 }
72 WebRtcLocalAudioTrack* track_;
73 };
74
75 protected:
76 virtual ~TrackOwner() {}
77
78 private:
79 friend class base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner>;
80
81 bool IsEqual(const WebRtcLocalAudioTrack* other) const {
82 base::AutoLock lock(lock_);
83 return (other == delegate_);
84 }
85
86 // Do NOT reference count the |delegate_| to avoid cyclic reference counting.
87 WebRtcLocalAudioTrack* delegate_;
88 mutable base::Lock lock_;
89
90 DISALLOW_COPY_AND_ASSIGN(TrackOwner);
91 };
92
93 // static
94 std::unique_ptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer(
95 int render_frame_id,
96 const StreamDeviceInfo& device_info,
97 const blink::WebMediaConstraints& constraints,
98 WebRtcAudioDeviceImpl* audio_device,
99 MediaStreamAudioSource* audio_source) {
100 std::unique_ptr<WebRtcAudioCapturer> capturer(new WebRtcAudioCapturer(
101 render_frame_id, device_info, constraints, audio_device, audio_source));
102 if (capturer->Initialize())
103 return capturer;
104
105 return NULL;
106 }
107
108 bool WebRtcAudioCapturer::Initialize() {
109 DCHECK(thread_checker_.CalledOnValidThread());
110 DVLOG(1) << "WebRtcAudioCapturer::Initialize()";
111 WebRtcLogMessage(base::StringPrintf( 83 WebRtcLogMessage(base::StringPrintf(
112 "WAC::Initialize. render_frame_id=%d" 84 "ProcessedLocalAudioSource::EnsureSourceIsStarted. render_frame_id=%d"
113 ", channel_layout=%d, sample_rate=%d, buffer_size=%d" 85 ", channel_layout=%d, sample_rate=%d, buffer_size=%d"
114 ", session_id=%d, paired_output_sample_rate=%d" 86 ", session_id=%d, paired_output_sample_rate=%d"
115 ", paired_output_frames_per_buffer=%d, effects=%d. ", 87 ", paired_output_frames_per_buffer=%d, effects=%d. ",
116 render_frame_id_, device_info_.device.input.channel_layout, 88 consumer_render_frame_id_, device_info().device.input.channel_layout,
117 device_info_.device.input.sample_rate, 89 device_info().device.input.sample_rate,
118 device_info_.device.input.frames_per_buffer, device_info_.session_id, 90 device_info().device.input.frames_per_buffer, device_info().session_id,
119 device_info_.device.matched_output.sample_rate, 91 device_info().device.matched_output.sample_rate,
120 device_info_.device.matched_output.frames_per_buffer, 92 device_info().device.matched_output.frames_per_buffer,
121 device_info_.device.input.effects)); 93 device_info().device.input.effects));
122 94
123 if (render_frame_id_ == -1) { 95 // Sanity-check that the constraints, plus the additional input effects are
124 // Return true here to allow injecting a new source via 96 // valid when combined.
125 // SetCapturerSourceForTesting() at a later state. 97 const MediaAudioConstraints audio_constraints(
126 return true; 98 constraints_, device_info().device.input.effects);
99 if (!audio_constraints.IsValid()) {
100 WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
101 " because MediaAudioConstraints are not valid.");
102 return false;
127 } 103 }
128 104
129 MediaAudioConstraints audio_constraints(constraints_, 105 // Build an AudioOptions by applying relevant constraints to it, and then use
130 device_info_.device.input.effects); 106 // it to create a webrtc::AudioSourceInterface instance.
131 if (!audio_constraints.IsValid()) 107 cricket::AudioOptions rtc_options;
108 rtc_options.echo_cancellation = ConstraintToOptional(
109 constraints_, &blink::WebMediaTrackConstraintSet::echoCancellation);
110 rtc_options.delay_agnostic_aec = ConstraintToOptional(
111 constraints_, &blink::WebMediaTrackConstraintSet::googDAEchoCancellation);
112 rtc_options.auto_gain_control = ConstraintToOptional(
113 constraints_, &blink::WebMediaTrackConstraintSet::googAutoGainControl);
114 rtc_options.experimental_agc = ConstraintToOptional(
115 constraints_,
116 &blink::WebMediaTrackConstraintSet::googExperimentalAutoGainControl);
117 rtc_options.noise_suppression = ConstraintToOptional(
118 constraints_, &blink::WebMediaTrackConstraintSet::googNoiseSuppression);
119 rtc_options.experimental_ns = ConstraintToOptional(
120 constraints_,
121 &blink::WebMediaTrackConstraintSet::googExperimentalNoiseSuppression);
122 rtc_options.highpass_filter = ConstraintToOptional(
123 constraints_, &blink::WebMediaTrackConstraintSet::googHighpassFilter);
124 rtc_options.typing_detection = ConstraintToOptional(
125 constraints_,
126 &blink::WebMediaTrackConstraintSet::googTypingNoiseDetection);
127 rtc_options.stereo_swapping = ConstraintToOptional(
128 constraints_, &blink::WebMediaTrackConstraintSet::googAudioMirroring);
129 MediaAudioConstraints::ApplyFixedAudioConstraints(&rtc_options);
130 if (device_info().device.input.effects &
131 media::AudioParameters::ECHO_CANCELLER) {
132 // TODO(hta): Figure out if we should be looking at echoCancellation.
133 // Previous code had googEchoCancellation only.
134 const blink::BooleanConstraint& echoCancellation =
135 constraints_.basic().googEchoCancellation;
136 if (echoCancellation.hasExact() && !echoCancellation.exact()) {
137 StreamDeviceInfo modified_device_info(device_info());
138 modified_device_info.device.input.effects &=
139 ~media::AudioParameters::ECHO_CANCELLER;
140 SetDeviceInfo(modified_device_info);
141 }
142 rtc_options.echo_cancellation = rtc::Optional<bool>(false);
143 }
144 rtc_source_ = pc_factory_->CreateLocalAudioSource(rtc_options);
145 if (rtc_source_->state() != webrtc::MediaSourceInterface::kLive) {
146 WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
147 " because the rtc LocalAudioSource is not live.");
132 return false; 148 return false;
149 }
133 150
134 media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>( 151 // Create the MediaStreamAudioProcessor, bound to the WebRTC audio device
135 device_info_.device.input.channel_layout); 152 // module.
153 WebRtcAudioDeviceImpl* const rtc_audio_device =
154 pc_factory_->GetWebRtcAudioDevice();
155 if (!rtc_audio_device) {
156 WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
157 " because there is no WebRtcAudioDeviceImpl instance.");
158 return false;
159 }
160 audio_processor_ = new rtc::RefCountedObject<MediaStreamAudioProcessor>(
161 constraints_, device_info().device.input, rtc_audio_device);
136 162
137 // If KEYBOARD_MIC effect is set, change the layout to the corresponding 163 // If KEYBOARD_MIC effect is set, change the layout to the corresponding
138 // layout that includes the keyboard mic. 164 // layout that includes the keyboard mic.
139 if ((device_info_.device.input.effects & 165 media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>(
166 device_info().device.input.channel_layout);
167 if ((device_info().device.input.effects &
140 media::AudioParameters::KEYBOARD_MIC) && 168 media::AudioParameters::KEYBOARD_MIC) &&
141 audio_constraints.GetGoogExperimentalNoiseSuppression()) { 169 audio_constraints.GetGoogExperimentalNoiseSuppression()) {
142 if (channel_layout == media::CHANNEL_LAYOUT_STEREO) { 170 if (channel_layout == media::CHANNEL_LAYOUT_STEREO) {
143 channel_layout = media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC; 171 channel_layout = media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC;
144 DVLOG(1) << "Changed stereo layout to stereo + keyboard mic layout due " 172 DVLOG(1) << "Changed stereo layout to stereo + keyboard mic layout due "
145 << "to KEYBOARD_MIC effect."; 173 << "to KEYBOARD_MIC effect.";
146 } else { 174 } else {
147 DVLOG(1) << "KEYBOARD_MIC effect ignored, not compatible with layout " 175 DVLOG(1) << "KEYBOARD_MIC effect ignored, not compatible with layout "
148 << channel_layout; 176 << channel_layout;
149 } 177 }
150 } 178 }
151 179
152 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; 180 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
153 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", 181 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
154 channel_layout, media::CHANNEL_LAYOUT_MAX + 1); 182 channel_layout, media::CHANNEL_LAYOUT_MAX + 1);
155 183
156 // Verify that the reported input channel configuration is supported. 184 // Verify that the reported input channel configuration is supported.
157 if (channel_layout != media::CHANNEL_LAYOUT_MONO && 185 if (channel_layout != media::CHANNEL_LAYOUT_MONO &&
158 channel_layout != media::CHANNEL_LAYOUT_STEREO && 186 channel_layout != media::CHANNEL_LAYOUT_STEREO &&
159 channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) { 187 channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) {
160 DLOG(ERROR) << channel_layout 188 WebRtcLogMessage(base::StringPrintf(
161 << " is not a supported input channel configuration."; 189 "ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
190 " because the input channel layout (%d) is not supported.",
191 static_cast<int>(channel_layout)));
162 return false; 192 return false;
163 } 193 }
164 194
165 DVLOG(1) << "Audio input hardware sample rate: " 195 DVLOG(1) << "Audio input hardware sample rate: "
166 << device_info_.device.input.sample_rate; 196 << device_info().device.input.sample_rate;
167 media::AudioSampleRate asr; 197 media::AudioSampleRate asr;
168 if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) { 198 if (media::ToAudioSampleRate(device_info().device.input.sample_rate, &asr)) {
169 UMA_HISTOGRAM_ENUMERATION( 199 UMA_HISTOGRAM_ENUMERATION(
170 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); 200 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1);
171 } else { 201 } else {
172 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", 202 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected",
173 device_info_.device.input.sample_rate); 203 device_info().device.input.sample_rate);
174 } 204 }
175 205
176 // Create and configure the default audio capturing source. 206 // Determine the audio format required of the AudioCapturerSource. Then, pass
177 SetCapturerSourceInternal( 207 // that to the |audio_processor_| and set the output format of this
178 AudioDeviceFactory::NewAudioCapturerSource(render_frame_id_), 208 // ProcessedLocalAudioSource to the processor's output format.
179 channel_layout, device_info_.device.input.sample_rate); 209 media::AudioParameters params(
210 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
211 device_info().device.input.sample_rate, 16,
212 GetBufferSize(device_info().device.input.sample_rate));
213 params.set_effects(device_info().device.input.effects);
214 DCHECK(params.IsValid());
215 audio_processor_->OnCaptureFormatChanged(params);
216 MediaStreamAudioSource::SetFormat(audio_processor_->OutputFormat());
180 217
181 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware 218 // Start the source.
182 // information from the capturer. 219 VLOG(1) << "Starting WebRTC audio source for consumption by render frame "
183 if (audio_device_) 220 << consumer_render_frame_id_ << " with input parameters={"
184 audio_device_->AddAudioCapturer(this); 221 << params.AsHumanReadableString() << "} and output parameters={"
222 << GetAudioParameters().AsHumanReadableString() << '}';
223 source_ =
224 AudioDeviceFactory::NewAudioCapturerSource(consumer_render_frame_id_);
225 source_->Initialize(params, this, device_info().session_id);
226 // We need to set the AGC control before starting the stream.
227 source_->SetAutomaticGainControl(true);
228 source_->Start();
229
230 // Register this source with the WebRtcAudioDeviceImpl.
231 rtc_audio_device->AddAudioCapturer(this);
185 232
186 return true; 233 return true;
187 } 234 }
188 235
189 WebRtcAudioCapturer::WebRtcAudioCapturer( 236 void ProcessedLocalAudioSource::EnsureSourceIsStopped() {
190 int render_frame_id, 237 DCHECK(thread_checker_.CalledOnValidThread());
191 const StreamDeviceInfo& device_info,
192 const blink::WebMediaConstraints& constraints,
193 WebRtcAudioDeviceImpl* audio_device,
194 MediaStreamAudioSource* audio_source)
195 : constraints_(constraints),
196 audio_processor_(new rtc::RefCountedObject<MediaStreamAudioProcessor>(
197 constraints,
198 device_info.device.input,
199 audio_device)),
200 running_(false),
201 render_frame_id_(render_frame_id),
202 device_info_(device_info),
203 volume_(0),
204 peer_connection_mode_(false),
205 audio_device_(audio_device),
206 audio_source_(audio_source) {
207 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()";
208 }
209 238
210 WebRtcAudioCapturer::~WebRtcAudioCapturer() { 239 if (!source_)
211 DCHECK(thread_checker_.CalledOnValidThread()); 240 return;
212 DCHECK(tracks_.IsEmpty());
213 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()";
214 Stop();
215 }
216 241
217 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { 242 if (WebRtcAudioDeviceImpl* rtc_audio_device =
218 DCHECK(thread_checker_.CalledOnValidThread()); 243 pc_factory_->GetWebRtcAudioDevice()) {
219 DCHECK(track); 244 rtc_audio_device->RemoveAudioCapturer(this);
220 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()";
221
222 track->SetLevel(level_calculator_.level());
223
224 // The track only grabs stats from the audio processor. Stats are only
225 // available if audio processing is turned on. Therefore, only provide the
226 // track a reference if audio processing is turned on.
227 if (audio_processor_->has_audio_processing())
228 track->SetAudioProcessor(audio_processor_);
229
230 {
231 base::AutoLock auto_lock(lock_);
232 // Verify that |track| is not already added to the list.
233 DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track)));
234
235 // Add with a tag, so we remember to call OnSetFormat() on the new
236 // track.
237 scoped_refptr<TrackOwner> track_owner(new TrackOwner(track));
238 tracks_.AddAndTag(track_owner.get());
239 }
240 }
241
242 void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
243 DCHECK(thread_checker_.CalledOnValidThread());
244 DVLOG(1) << "WebRtcAudioCapturer::RemoveTrack()";
245 bool stop_source = false;
246 {
247 base::AutoLock auto_lock(lock_);
248
249 scoped_refptr<TrackOwner> removed_item =
250 tracks_.Remove(TrackOwner::TrackWrapper(track));
251
252 // Clear the delegate to ensure that no more capture callbacks will
253 // be sent to this sink. Also avoids a possible crash which can happen
254 // if this method is called while capturing is active.
255 if (removed_item.get()) {
256 removed_item->Reset();
257 stop_source = tracks_.IsEmpty();
258 }
259 }
260 if (stop_source) {
261 // Since WebRtcAudioCapturer does not inherit MediaStreamAudioSource,
262 // and instead MediaStreamAudioSource is composed of a WebRtcAudioCapturer,
263 // we have to call StopSource on the MediaStreamSource. This will call
264 // MediaStreamAudioSource::DoStopSource which in turn call
265 // WebRtcAudioCapturerer::Stop();
266 audio_source_->StopSource();
267 }
268 }
269
270 void WebRtcAudioCapturer::SetCapturerSourceInternal(
271 const scoped_refptr<media::AudioCapturerSource>& source,
272 media::ChannelLayout channel_layout,
273 int sample_rate) {
274 DCHECK(thread_checker_.CalledOnValidThread());
275 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << ","
276 << "sample_rate=" << sample_rate << ")";
277 scoped_refptr<media::AudioCapturerSource> old_source;
278 {
279 base::AutoLock auto_lock(lock_);
280 if (source_.get() == source.get())
281 return;
282
283 source_.swap(old_source);
284 source_ = source;
285
286 // Reset the flag to allow starting the new source.
287 running_ = false;
288 } 245 }
289 246
290 DVLOG(1) << "Switching to a new capture source."; 247 // Note: Stopping the source while holding the |volume_lock_| because the
291 if (old_source.get()) 248 // SetVolume() method needs to know whether |source_| is valid.
292 old_source->Stop();
293
294 // Dispatch the new parameters both to the sink(s) and to the new source,
295 // also apply the new |constraints|.
296 // The idea is to get rid of any dependency of the microphone parameters
297 // which would normally be used by default.
298 // bits_per_sample is always 16 for now.
299 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
300 channel_layout, sample_rate, 16,
301 GetBufferSize(sample_rate));
302 params.set_effects(device_info_.device.input.effects);
303 DCHECK(params.IsValid());
304
305 { 249 {
306 base::AutoLock auto_lock(lock_); 250 base::AutoLock auto_lock(volume_lock_);
307 251 source_->Stop();
308 // Notify the |audio_processor_| of the new format. We're doing this while 252 source_ = nullptr;
309 // the lock is held only because the signaling thread might be calling
310 // GetInputFormat(). Simultaneous reads from the audio thread are NOT the
311 // concern here since the source is currently stopped (i.e., no audio
312 // capture calls can be executing).
313 audio_processor_->OnCaptureFormatChanged(params);
314
315 // Notify all tracks about the new format.
316 tracks_.TagAll();
317 } 253 }
318 254
319 if (source.get())
320 source->Initialize(params, this, device_info_.session_id);
321
322 Start();
323 }
324
325 void WebRtcAudioCapturer::EnablePeerConnectionMode() {
326 DCHECK(thread_checker_.CalledOnValidThread());
327 DVLOG(1) << "EnablePeerConnectionMode";
328 // Do nothing if the peer connection mode has been enabled.
329 if (peer_connection_mode_)
330 return;
331
332 peer_connection_mode_ = true;
333 int render_frame_id = -1;
334 media::AudioParameters input_params;
335 {
336 base::AutoLock auto_lock(lock_);
337 // Simply return if there is no existing source or the |render_frame_id_| is
338 // not valid.
339 if (!source_.get() || render_frame_id_ == -1)
340 return;
341
342 render_frame_id = render_frame_id_;
343 input_params = audio_processor_->InputFormat();
344 }
345
346 // Do nothing if the current buffer size is the WebRtc native buffer size.
347 if (GetBufferSize(input_params.sample_rate()) ==
348 input_params.frames_per_buffer()) {
349 return;
350 }
351
352 // Create a new audio stream as source which will open the hardware using
353 // WebRtc native buffer size.
354 SetCapturerSourceInternal(
355 AudioDeviceFactory::NewAudioCapturerSource(render_frame_id),
356 input_params.channel_layout(), input_params.sample_rate());
357 }
358
359 void WebRtcAudioCapturer::Start() {
360 DCHECK(thread_checker_.CalledOnValidThread());
361 DVLOG(1) << "WebRtcAudioCapturer::Start()";
362 base::AutoLock auto_lock(lock_);
363 if (running_ || !source_.get())
364 return;
365
366 // Start the data source, i.e., start capturing data from the current source.
367 // We need to set the AGC control before starting the stream.
368 source_->SetAutomaticGainControl(true);
369 source_->Start();
370 running_ = true;
371 }
372
373 void WebRtcAudioCapturer::Stop() {
374 DCHECK(thread_checker_.CalledOnValidThread());
375 DVLOG(1) << "WebRtcAudioCapturer::Stop()";
376 scoped_refptr<media::AudioCapturerSource> source;
377 TrackList::ItemList tracks;
378 {
379 base::AutoLock auto_lock(lock_);
380 if (!running_)
381 return;
382
383 source = source_;
384 tracks = tracks_.Items();
385 tracks_.Clear();
386 running_ = false;
387 }
388
389 // Remove the capturer object from the WebRtcAudioDeviceImpl.
390 if (audio_device_)
391 audio_device_->RemoveAudioCapturer(this);
392
393 for (TrackList::ItemList::const_iterator it = tracks.begin();
394 it != tracks.end();
395 ++it) {
396 (*it)->Stop();
397 }
398
399 if (source.get())
400 source->Stop();
401
402 // Stop the audio processor to avoid feeding render data into the processor. 255 // Stop the audio processor to avoid feeding render data into the processor.
403 audio_processor_->Stop(); 256 audio_processor_->Stop();
257
258 VLOG(1) << "Stopped WebRTC audio pipeline for consumption by render frame "
259 << consumer_render_frame_id_ << '.';
404 } 260 }
405 261
406 void WebRtcAudioCapturer::SetVolume(int volume) { 262 void ProcessedLocalAudioSource::SetVolume(int volume) {
407 DVLOG(1) << "WebRtcAudioCapturer::SetVolume()"; 263 DVLOG(1) << "ProcessedLocalAudioSource::SetVolume()";
408 DCHECK_LE(volume, MaxVolume()); 264 DCHECK_LE(volume, MaxVolume());
409 double normalized_volume = static_cast<double>(volume) / MaxVolume(); 265 double normalized_volume = static_cast<double>(volume) / MaxVolume();
410 base::AutoLock auto_lock(lock_); 266 base::AutoLock auto_lock(volume_lock_);
411 if (source_.get()) 267 if (source_)
412 source_->SetVolume(normalized_volume); 268 source_->SetVolume(normalized_volume);
413 } 269 }
414 270
415 int WebRtcAudioCapturer::Volume() const { 271 int ProcessedLocalAudioSource::Volume() const {
416 base::AutoLock auto_lock(lock_); 272 base::AutoLock auto_lock(volume_lock_);
417 return volume_; 273 return volume_;
418 } 274 }
419 275
420 int WebRtcAudioCapturer::MaxVolume() const { 276 int ProcessedLocalAudioSource::MaxVolume() const {
421 return WebRtcAudioDeviceImpl::kMaxVolumeLevel; 277 return WebRtcAudioDeviceImpl::kMaxVolumeLevel;
422 } 278 }
423 279
424 media::AudioParameters WebRtcAudioCapturer::GetOutputFormat() const { 280 void ProcessedLocalAudioSource::Capture(const media::AudioBus* audio_bus,
425 DCHECK(thread_checker_.CalledOnValidThread()); 281 int audio_delay_milliseconds,
426 return audio_processor_->OutputFormat(); 282 double volume,
427 } 283 bool key_pressed) {
428
429 void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source,
430 int audio_delay_milliseconds,
431 double volume,
432 bool key_pressed) {
433 // This callback is driven by AudioInputDevice::AudioThreadCallback if
434 // |source_| is AudioInputDevice, otherwise it is driven by client's
435 // CaptureCallback.
436 #if defined(OS_WIN) || defined(OS_MACOSX) 284 #if defined(OS_WIN) || defined(OS_MACOSX)
437 DCHECK_LE(volume, 1.0); 285 DCHECK_LE(volume, 1.0);
438 #elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD) 286 #elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD)
439 // We have a special situation on Linux where the microphone volume can be 287 // We have a special situation on Linux where the microphone volume can be
440 // "higher than maximum". The input volume slider in the sound preference 288 // "higher than maximum". The input volume slider in the sound preference
441 // allows the user to set a scaling that is higher than 100%. It means that 289 // allows the user to set a scaling that is higher than 100%. It means that
442 // even if the reported maximum levels is N, the actual microphone level can 290 // even if the reported maximum levels is N, the actual microphone level can
443 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. 291 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x.
444 DCHECK_LE(volume, 1.6); 292 DCHECK_LE(volume, 1.6);
445 #endif 293 #endif
446 294
447 // TODO(miu): Plumbing is needed to determine the actual capture timestamp 295 // TODO(miu): Plumbing is needed to determine the actual capture timestamp
448 // of the audio, instead of just snapshotting TimeTicks::Now(), for proper 296 // of the audio, instead of just snapshotting TimeTicks::Now(), for proper
449 // audio/video sync. http://crbug.com/335335 297 // audio/video sync. http://crbug.com/335335
450 const base::TimeTicks reference_clock_snapshot = base::TimeTicks::Now(); 298 const base::TimeTicks reference_clock_snapshot = base::TimeTicks::Now();
451 299
452 TrackList::ItemList tracks; 300 // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC.
453 TrackList::ItemList tracks_to_notify_format; 301 // The volume can be higher than 255 on Linux, and it will be cropped to
454 int current_volume = 0; 302 // 255 since AGC does not allow values out of range.
303 int current_volume = static_cast<int>((volume * MaxVolume()) + 0.5);
455 { 304 {
456 base::AutoLock auto_lock(lock_); 305 base::AutoLock auto_lock(volume_lock_);
457 if (!running_) 306 volume_ = current_volume;
458 return;
459
460 // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC.
461 // The volume can be higher than 255 on Linux, and it will be cropped to
462 // 255 since AGC does not allow values out of range.
463 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5);
464 current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_;
465 tracks = tracks_.Items();
466 tracks_.RetrieveAndClearTags(&tracks_to_notify_format);
467 } 307 }
308 current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_;
468 309
469 // Sanity-check the input audio format in debug builds. Then, notify the 310 // Sanity-check the input audio format in debug builds. Then, notify the
470 // tracks if the format has changed. 311 // tracks if the format has changed.
471 // 312 //
472 // Locking is not needed here to read the audio input/output parameters 313 // Locking is not needed here to read the audio input/output parameters
473 // because the audio processor format changes only occur while audio capture 314 // because the audio processor format changes only occur while audio capture
474 // is stopped. 315 // is stopped.
475 DCHECK(audio_processor_->InputFormat().IsValid()); 316 DCHECK(audio_processor_->InputFormat().IsValid());
476 DCHECK_EQ(audio_source->channels(), 317 DCHECK_EQ(audio_bus->channels(), audio_processor_->InputFormat().channels());
477 audio_processor_->InputFormat().channels()); 318 DCHECK_EQ(audio_bus->frames(),
478 DCHECK_EQ(audio_source->frames(),
479 audio_processor_->InputFormat().frames_per_buffer()); 319 audio_processor_->InputFormat().frames_per_buffer());
480 if (!tracks_to_notify_format.empty()) {
481 const media::AudioParameters& output_params =
482 audio_processor_->OutputFormat();
483 for (const auto& track : tracks_to_notify_format)
484 track->OnSetFormat(output_params);
485 }
486 320
487 // Figure out if the pre-processed data has any energy or not. This 321 // Figure out if the pre-processed data has any energy or not. This
488 // information will be passed to the level calculator to force it to report 322 // information will be passed to the level calculator to force it to report
489 // energy in case the post-processed data is zeroed by the audio processing. 323 // energy in case the post-processed data is zeroed by the audio processing.
490 const bool force_report_nonzero_energy = !audio_source->AreFramesZero(); 324 const bool force_report_nonzero_energy = !audio_bus->AreFramesZero();
491 325
492 // Push the data to the processor for processing. 326 // Push the data to the processor for processing.
493 audio_processor_->PushCaptureData( 327 audio_processor_->PushCaptureData(
494 *audio_source, 328 *audio_bus,
495 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); 329 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds));
496 330
497 // Process and consume the data in the processor until there is not enough 331 // Process and consume the data in the processor until there is not enough
498 // data in the processor. 332 // data in the processor.
499 media::AudioBus* processed_data = nullptr; 333 media::AudioBus* processed_data = nullptr;
500 base::TimeDelta processed_data_audio_delay; 334 base::TimeDelta processed_data_audio_delay;
501 int new_volume = 0; 335 int new_volume = 0;
502 while (audio_processor_->ProcessAndConsumeData( 336 while (audio_processor_->ProcessAndConsumeData(
503 current_volume, key_pressed, 337 current_volume, key_pressed,
504 &processed_data, &processed_data_audio_delay, &new_volume)) { 338 &processed_data, &processed_data_audio_delay, &new_volume)) {
505 DCHECK(processed_data); 339 DCHECK(processed_data);
506 340
507 level_calculator_.Calculate(*processed_data, force_report_nonzero_energy); 341 level_calculator_.Calculate(*processed_data, force_report_nonzero_energy);
508 342
509 const base::TimeTicks processed_data_capture_time = 343 MediaStreamAudioSource::DeliverDataToTracks(
510 reference_clock_snapshot - processed_data_audio_delay; 344 *processed_data, reference_clock_snapshot - processed_data_audio_delay);
511 for (const auto& track : tracks)
512 track->Capture(*processed_data, processed_data_capture_time);
513 345
514 if (new_volume) { 346 if (new_volume) {
515 SetVolume(new_volume); 347 SetVolume(new_volume);
516 348
517 // Update the |current_volume| to avoid passing the old volume to AGC. 349 // Update the |current_volume| to avoid passing the old volume to AGC.
518 current_volume = new_volume; 350 current_volume = new_volume;
519 } 351 }
520 } 352 }
521 } 353 }
522 354
523 void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { 355 void ProcessedLocalAudioSource::OnCaptureError(const std::string& message) {
524 WebRtcLogMessage("WAC::OnCaptureError: " + message); 356 WebRtcLogMessage("ProcessedLocalAudioSource::OnCaptureError: " + message);
525 } 357 }
526 358
527 media::AudioParameters WebRtcAudioCapturer::GetInputFormat() const { 359 media::AudioParameters ProcessedLocalAudioSource::GetInputFormat() const {
528 base::AutoLock auto_lock(lock_); 360 return audio_processor_ ? audio_processor_->InputFormat()
529 return audio_processor_->InputFormat(); 361 : media::AudioParameters();
530 } 362 }
531 363
532 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { 364 int ProcessedLocalAudioSource::GetBufferSize(int sample_rate) const {
533 DCHECK(thread_checker_.CalledOnValidThread()); 365 DCHECK(thread_checker_.CalledOnValidThread());
534 #if defined(OS_ANDROID) 366 #if defined(OS_ANDROID)
535 // TODO(henrika): Tune and adjust buffer size on Android. 367 // TODO(henrika): Re-evaluate whether to use same logic as other platforms.
536 return (2 * sample_rate / 100); 368 return (2 * sample_rate / 100);
537 #endif 369 #endif
538 370
539 // PeerConnection is running at a buffer size of 10ms data. A multiple of 371 // If audio processing is turned on, require 10ms buffers.
540 // 10ms as the buffer size can give the best performance to PeerConnection. 372 if (audio_processor_->has_audio_processing())
541 int peer_connection_buffer_size = sample_rate / 100; 373 return (sample_rate / 100);
542 374
543 // Use the native hardware buffer size in non peer connection mode when the 375 // If audio processing is off and the native hardware buffer size was
544 // platform is using a native buffer size smaller than the PeerConnection 376 // provided, use it. It can be harmful, in terms of CPU/power consumption, to
545 // buffer size and audio processing is off. 377 // use smaller buffer sizes than the native size (http://crbug.com/362261).
546 int hardware_buffer_size = device_info_.device.input.frames_per_buffer; 378 if (int hardware_buffer_size = device_info().device.input.frames_per_buffer)
547 if (!peer_connection_mode_ && hardware_buffer_size &&
548 hardware_buffer_size <= peer_connection_buffer_size &&
549 !audio_processor_->has_audio_processing()) {
550 DVLOG(1) << "WebRtcAudioCapturer is using hardware buffer size "
551 << hardware_buffer_size;
552 return hardware_buffer_size; 379 return hardware_buffer_size;
553 }
554 380
381 // If the buffer size is missing from the StreamDeviceInfo, provide 10ms as a
382 // fall-back.
383 //
384 // TODO(miu): Identify where/why the buffer size might be missing, fix the
385 // code, and then require it here.
555 return (sample_rate / 100); 386 return (sample_rate / 100);
556 } 387 }
557 388
558 void WebRtcAudioCapturer::SetCapturerSource(
559 const scoped_refptr<media::AudioCapturerSource>& source,
560 media::AudioParameters params) {
561 // Create a new audio stream as source which uses the new source.
562 SetCapturerSourceInternal(source, params.channel_layout(),
563 params.sample_rate());
564 }
565
566 } // namespace content 389 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698