OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_capturer.h" | 5 #include "content/renderer/media/webrtc/processed_local_audio_source.h" |
6 | 6 |
7 #include "base/bind.h" | |
8 #include "base/logging.h" | 7 #include "base/logging.h" |
9 #include "base/macros.h" | |
10 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
11 #include "base/strings/string_util.h" | |
12 #include "base/strings/stringprintf.h" | 9 #include "base/strings/stringprintf.h" |
13 #include "build/build_config.h" | |
14 #include "content/child/child_process.h" | |
15 #include "content/renderer/media/audio_device_factory.h" | 10 #include "content/renderer/media/audio_device_factory.h" |
16 #include "content/renderer/media/media_stream_audio_processor.h" | |
17 #include "content/renderer/media/media_stream_audio_processor_options.h" | 11 #include "content/renderer/media/media_stream_audio_processor_options.h" |
18 #include "content/renderer/media/media_stream_audio_source.h" | |
19 #include "content/renderer/media/media_stream_constraints_util.h" | 12 #include "content/renderer/media/media_stream_constraints_util.h" |
| 13 #include "content/renderer/media/webrtc/peer_connection_dependency_factory.h" |
20 #include "content/renderer/media/webrtc_audio_device_impl.h" | 14 #include "content/renderer/media/webrtc_audio_device_impl.h" |
21 #include "content/renderer/media/webrtc_local_audio_track.h" | |
22 #include "content/renderer/media/webrtc_logging.h" | 15 #include "content/renderer/media/webrtc_logging.h" |
| 16 #include "content/renderer/render_frame_impl.h" |
| 17 #include "content/renderer/render_thread_impl.h" |
| 18 #include "media/audio/audio_input_device.h" |
23 #include "media/audio/sample_rates.h" | 19 #include "media/audio/sample_rates.h" |
| 20 #include "media/base/audio_hardware_config.h" |
| 21 #include "media/base/channel_layout.h" |
| 22 #include "third_party/webrtc/api/mediaconstraintsinterface.h" |
| 23 #include "third_party/webrtc/media/base/mediachannel.h" |
24 | 24 |
25 namespace content { | 25 namespace content { |
26 | 26 |
27 // Reference counted container of WebRtcLocalAudioTrack delegate. | 27 namespace { |
28 // TODO(xians): Switch to MediaStreamAudioSinkOwner. | 28 // Used as an identifier for ProcessedLocalAudioSource::From(). |
29 class WebRtcAudioCapturer::TrackOwner | 29 void* const kClassIdentifier = const_cast<void**>(&kClassIdentifier); |
30 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { | 30 } // namespace |
31 public: | |
32 explicit TrackOwner(WebRtcLocalAudioTrack* track) | |
33 : delegate_(track) {} | |
34 | 31 |
35 void Capture(const media::AudioBus& audio_bus, | 32 ProcessedLocalAudioSource::ProcessedLocalAudioSource( |
36 base::TimeTicks estimated_capture_time) { | 33 int consumer_render_frame_id, |
37 base::AutoLock lock(lock_); | 34 const StreamDeviceInfo& device_info, |
38 if (delegate_) { | 35 PeerConnectionDependencyFactory* factory) |
39 delegate_->Capture(audio_bus, estimated_capture_time); | 36 : MediaStreamAudioSource(true /* is_local_source */), |
40 } | 37 consumer_render_frame_id_(consumer_render_frame_id), |
| 38 pc_factory_(factory), |
| 39 volume_(0), |
| 40 allow_invalid_render_frame_id_for_testing_(false) { |
| 41 DCHECK(pc_factory_); |
| 42 DVLOG(1) << "ProcessedLocalAudioSource::ProcessedLocalAudioSource()"; |
| 43 MediaStreamSource::SetDeviceInfo(device_info); |
| 44 } |
| 45 |
| 46 ProcessedLocalAudioSource::~ProcessedLocalAudioSource() { |
| 47 DVLOG(1) << "ProcessedLocalAudioSource::~ProcessedLocalAudioSource()"; |
| 48 EnsureSourceIsStopped(); |
| 49 } |
| 50 |
| 51 // static |
| 52 ProcessedLocalAudioSource* ProcessedLocalAudioSource::From( |
| 53 MediaStreamAudioSource* source) { |
| 54 if (source && source->GetClassIdentifier() == kClassIdentifier) |
| 55 return static_cast<ProcessedLocalAudioSource*>(source); |
| 56 return nullptr; |
| 57 } |
| 58 |
| 59 void ProcessedLocalAudioSource::SetSourceConstraints( |
| 60 const blink::WebMediaConstraints& constraints) { |
| 61 DCHECK(!constraints.isNull()); |
| 62 constraints_ = constraints; |
| 63 } |
| 64 |
| 65 void* ProcessedLocalAudioSource::GetClassIdentifier() const { |
| 66 return kClassIdentifier; |
| 67 } |
| 68 |
| 69 bool ProcessedLocalAudioSource::EnsureSourceIsStarted() { |
| 70 DCHECK(thread_checker_.CalledOnValidThread()); |
| 71 |
| 72 if (source_) |
| 73 return true; |
| 74 |
| 75 // Sanity-check that the consuming RenderFrame still exists. This is |
| 76 // required to initialize the audio source. |
| 77 if (!allow_invalid_render_frame_id_for_testing_ && |
| 78 !RenderFrameImpl::FromRoutingID(consumer_render_frame_id_)) { |
| 79 WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
| 80 " because the render frame does not exist."); |
| 81 return false; |
41 } | 82 } |
42 | 83 |
43 void OnSetFormat(const media::AudioParameters& params) { | |
44 base::AutoLock lock(lock_); | |
45 if (delegate_) | |
46 delegate_->OnSetFormat(params); | |
47 } | |
48 | |
49 void Reset() { | |
50 base::AutoLock lock(lock_); | |
51 delegate_ = NULL; | |
52 } | |
53 | |
54 void Stop() { | |
55 base::AutoLock lock(lock_); | |
56 DCHECK(delegate_); | |
57 | |
58 // This can be reentrant so reset |delegate_| before calling out. | |
59 WebRtcLocalAudioTrack* temp = delegate_; | |
60 delegate_ = NULL; | |
61 temp->Stop(); | |
62 } | |
63 | |
64 // Wrapper which allows to use std::find_if() when adding and removing | |
65 // sinks to/from the list. | |
66 struct TrackWrapper { | |
67 explicit TrackWrapper(WebRtcLocalAudioTrack* track) : track_(track) {} | |
68 bool operator()( | |
69 const scoped_refptr<WebRtcAudioCapturer::TrackOwner>& owner) const { | |
70 return owner->IsEqual(track_); | |
71 } | |
72 WebRtcLocalAudioTrack* track_; | |
73 }; | |
74 | |
75 protected: | |
76 virtual ~TrackOwner() {} | |
77 | |
78 private: | |
79 friend class base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner>; | |
80 | |
81 bool IsEqual(const WebRtcLocalAudioTrack* other) const { | |
82 base::AutoLock lock(lock_); | |
83 return (other == delegate_); | |
84 } | |
85 | |
86 // Do NOT reference count the |delegate_| to avoid cyclic reference counting. | |
87 WebRtcLocalAudioTrack* delegate_; | |
88 mutable base::Lock lock_; | |
89 | |
90 DISALLOW_COPY_AND_ASSIGN(TrackOwner); | |
91 }; | |
92 | |
93 // static | |
94 std::unique_ptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( | |
95 int render_frame_id, | |
96 const StreamDeviceInfo& device_info, | |
97 const blink::WebMediaConstraints& constraints, | |
98 WebRtcAudioDeviceImpl* audio_device, | |
99 MediaStreamAudioSource* audio_source) { | |
100 std::unique_ptr<WebRtcAudioCapturer> capturer(new WebRtcAudioCapturer( | |
101 render_frame_id, device_info, constraints, audio_device, audio_source)); | |
102 if (capturer->Initialize()) | |
103 return capturer; | |
104 | |
105 return NULL; | |
106 } | |
107 | |
108 bool WebRtcAudioCapturer::Initialize() { | |
109 DCHECK(thread_checker_.CalledOnValidThread()); | |
110 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; | |
111 WebRtcLogMessage(base::StringPrintf( | 84 WebRtcLogMessage(base::StringPrintf( |
112 "WAC::Initialize. render_frame_id=%d" | 85 "ProcessedLocalAudioSource::EnsureSourceIsStarted. render_frame_id=%d" |
113 ", channel_layout=%d, sample_rate=%d, buffer_size=%d" | 86 ", channel_layout=%d, sample_rate=%d, buffer_size=%d" |
114 ", session_id=%d, paired_output_sample_rate=%d" | 87 ", session_id=%d, paired_output_sample_rate=%d" |
115 ", paired_output_frames_per_buffer=%d, effects=%d. ", | 88 ", paired_output_frames_per_buffer=%d, effects=%d. ", |
116 render_frame_id_, device_info_.device.input.channel_layout, | 89 consumer_render_frame_id_, device_info().device.input.channel_layout, |
117 device_info_.device.input.sample_rate, | 90 device_info().device.input.sample_rate, |
118 device_info_.device.input.frames_per_buffer, device_info_.session_id, | 91 device_info().device.input.frames_per_buffer, device_info().session_id, |
119 device_info_.device.matched_output.sample_rate, | 92 device_info().device.matched_output.sample_rate, |
120 device_info_.device.matched_output.frames_per_buffer, | 93 device_info().device.matched_output.frames_per_buffer, |
121 device_info_.device.input.effects)); | 94 device_info().device.input.effects)); |
122 | 95 |
123 if (render_frame_id_ == -1) { | 96 // Sanity-check that the constraints, plus the additional input effects are |
124 // Return true here to allow injecting a new source via | 97 // valid when combined. |
125 // SetCapturerSourceForTesting() at a later state. | 98 const MediaAudioConstraints audio_constraints( |
126 return true; | 99 constraints_, device_info().device.input.effects); |
| 100 if (!audio_constraints.IsValid()) { |
| 101 WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
| 102 " because MediaAudioConstraints are not valid."); |
| 103 return false; |
127 } | 104 } |
128 | 105 |
129 MediaAudioConstraints audio_constraints(constraints_, | 106 // Build an AudioOptions by applying relevant constraints to it, and then use |
130 device_info_.device.input.effects); | 107 // it to create a webrtc::AudioSourceInterface instance. |
131 if (!audio_constraints.IsValid()) | 108 cricket::AudioOptions rtc_options; |
| 109 rtc_options.echo_cancellation = ConstraintToOptional( |
| 110 constraints_, &blink::WebMediaTrackConstraintSet::echoCancellation); |
| 111 rtc_options.delay_agnostic_aec = ConstraintToOptional( |
| 112 constraints_, &blink::WebMediaTrackConstraintSet::googDAEchoCancellation); |
| 113 rtc_options.auto_gain_control = ConstraintToOptional( |
| 114 constraints_, &blink::WebMediaTrackConstraintSet::googAutoGainControl); |
| 115 rtc_options.experimental_agc = ConstraintToOptional( |
| 116 constraints_, |
| 117 &blink::WebMediaTrackConstraintSet::googExperimentalAutoGainControl); |
| 118 rtc_options.noise_suppression = ConstraintToOptional( |
| 119 constraints_, &blink::WebMediaTrackConstraintSet::googNoiseSuppression); |
| 120 rtc_options.experimental_ns = ConstraintToOptional( |
| 121 constraints_, |
| 122 &blink::WebMediaTrackConstraintSet::googExperimentalNoiseSuppression); |
| 123 rtc_options.highpass_filter = ConstraintToOptional( |
| 124 constraints_, &blink::WebMediaTrackConstraintSet::googHighpassFilter); |
| 125 rtc_options.typing_detection = ConstraintToOptional( |
| 126 constraints_, |
| 127 &blink::WebMediaTrackConstraintSet::googTypingNoiseDetection); |
| 128 rtc_options.stereo_swapping = ConstraintToOptional( |
| 129 constraints_, &blink::WebMediaTrackConstraintSet::googAudioMirroring); |
| 130 MediaAudioConstraints::ApplyFixedAudioConstraints(&rtc_options); |
| 131 if (device_info().device.input.effects & |
| 132 media::AudioParameters::ECHO_CANCELLER) { |
| 133 // TODO(hta): Figure out if we should be looking at echoCancellation. |
| 134 // Previous code had googEchoCancellation only. |
| 135 const blink::BooleanConstraint& echoCancellation = |
| 136 constraints_.basic().googEchoCancellation; |
| 137 if (echoCancellation.hasExact() && !echoCancellation.exact()) { |
| 138 StreamDeviceInfo modified_device_info(device_info()); |
| 139 modified_device_info.device.input.effects &= |
| 140 ~media::AudioParameters::ECHO_CANCELLER; |
| 141 SetDeviceInfo(modified_device_info); |
| 142 } |
| 143 rtc_options.echo_cancellation = rtc::Optional<bool>(false); |
| 144 } |
| 145 rtc_source_ = pc_factory_->CreateLocalAudioSource(rtc_options); |
| 146 if (rtc_source_->state() != webrtc::MediaSourceInterface::kLive) { |
| 147 WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
| 148 " because the rtc LocalAudioSource is not live."); |
132 return false; | 149 return false; |
| 150 } |
133 | 151 |
134 media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>( | 152 // Create the MediaStreamAudioProcessor, bound to the WebRTC audio device |
135 device_info_.device.input.channel_layout); | 153 // module. |
| 154 WebRtcAudioDeviceImpl* const rtc_audio_device = |
| 155 pc_factory_->GetWebRtcAudioDevice(); |
| 156 if (!rtc_audio_device) { |
| 157 WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
| 158 " because there is no WebRtcAudioDeviceImpl instance."); |
| 159 return false; |
| 160 } |
| 161 audio_processor_ = new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 162 constraints_, device_info().device.input, rtc_audio_device); |
136 | 163 |
137 // If KEYBOARD_MIC effect is set, change the layout to the corresponding | 164 // If KEYBOARD_MIC effect is set, change the layout to the corresponding |
138 // layout that includes the keyboard mic. | 165 // layout that includes the keyboard mic. |
139 if ((device_info_.device.input.effects & | 166 media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>( |
| 167 device_info().device.input.channel_layout); |
| 168 if ((device_info().device.input.effects & |
140 media::AudioParameters::KEYBOARD_MIC) && | 169 media::AudioParameters::KEYBOARD_MIC) && |
141 audio_constraints.GetGoogExperimentalNoiseSuppression()) { | 170 audio_constraints.GetGoogExperimentalNoiseSuppression()) { |
142 if (channel_layout == media::CHANNEL_LAYOUT_STEREO) { | 171 if (channel_layout == media::CHANNEL_LAYOUT_STEREO) { |
143 channel_layout = media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC; | 172 channel_layout = media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC; |
144 DVLOG(1) << "Changed stereo layout to stereo + keyboard mic layout due " | 173 DVLOG(1) << "Changed stereo layout to stereo + keyboard mic layout due " |
145 << "to KEYBOARD_MIC effect."; | 174 << "to KEYBOARD_MIC effect."; |
146 } else { | 175 } else { |
147 DVLOG(1) << "KEYBOARD_MIC effect ignored, not compatible with layout " | 176 DVLOG(1) << "KEYBOARD_MIC effect ignored, not compatible with layout " |
148 << channel_layout; | 177 << channel_layout; |
149 } | 178 } |
150 } | 179 } |
151 | 180 |
152 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; | 181 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; |
153 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", | 182 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", |
154 channel_layout, media::CHANNEL_LAYOUT_MAX + 1); | 183 channel_layout, media::CHANNEL_LAYOUT_MAX + 1); |
155 | 184 |
156 // Verify that the reported input channel configuration is supported. | 185 // Verify that the reported input channel configuration is supported. |
157 if (channel_layout != media::CHANNEL_LAYOUT_MONO && | 186 if (channel_layout != media::CHANNEL_LAYOUT_MONO && |
158 channel_layout != media::CHANNEL_LAYOUT_STEREO && | 187 channel_layout != media::CHANNEL_LAYOUT_STEREO && |
159 channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) { | 188 channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) { |
160 DLOG(ERROR) << channel_layout | 189 WebRtcLogMessage(base::StringPrintf( |
161 << " is not a supported input channel configuration."; | 190 "ProcessedLocalAudioSource::EnsureSourceIsStarted() fails " |
| 191 " because the input channel layout (%d) is not supported.", |
| 192 static_cast<int>(channel_layout))); |
162 return false; | 193 return false; |
163 } | 194 } |
164 | 195 |
165 DVLOG(1) << "Audio input hardware sample rate: " | 196 DVLOG(1) << "Audio input hardware sample rate: " |
166 << device_info_.device.input.sample_rate; | 197 << device_info().device.input.sample_rate; |
167 media::AudioSampleRate asr; | 198 media::AudioSampleRate asr; |
168 if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) { | 199 if (media::ToAudioSampleRate(device_info().device.input.sample_rate, &asr)) { |
169 UMA_HISTOGRAM_ENUMERATION( | 200 UMA_HISTOGRAM_ENUMERATION( |
170 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); | 201 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); |
171 } else { | 202 } else { |
172 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", | 203 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", |
173 device_info_.device.input.sample_rate); | 204 device_info().device.input.sample_rate); |
174 } | 205 } |
175 | 206 |
176 // Create and configure the default audio capturing source. | 207 // Determine the audio format required of the AudioCapturerSource. Then, pass |
177 SetCapturerSourceInternal( | 208 // that to the |audio_processor_| and set the output format of this |
178 AudioDeviceFactory::NewAudioCapturerSource(render_frame_id_), | 209 // ProcessedLocalAudioSource to the processor's output format. |
179 channel_layout, device_info_.device.input.sample_rate); | 210 media::AudioParameters params( |
| 211 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, |
| 212 device_info().device.input.sample_rate, 16, |
| 213 GetBufferSize(device_info().device.input.sample_rate)); |
| 214 params.set_effects(device_info().device.input.effects); |
| 215 DCHECK(params.IsValid()); |
| 216 audio_processor_->OnCaptureFormatChanged(params); |
| 217 MediaStreamAudioSource::SetFormat(audio_processor_->OutputFormat()); |
180 | 218 |
181 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware | 219 // Start the source. |
182 // information from the capturer. | 220 VLOG(1) << "Starting WebRTC audio source for consumption by render frame " |
183 if (audio_device_) | 221 << consumer_render_frame_id_ << " with input parameters={" |
184 audio_device_->AddAudioCapturer(this); | 222 << params.AsHumanReadableString() << "} and output parameters={" |
| 223 << GetAudioParameters().AsHumanReadableString() << '}'; |
| 224 source_ = |
| 225 AudioDeviceFactory::NewAudioCapturerSource(consumer_render_frame_id_); |
| 226 source_->Initialize(params, this, device_info().session_id); |
| 227 // We need to set the AGC control before starting the stream. |
| 228 source_->SetAutomaticGainControl(true); |
| 229 source_->Start(); |
| 230 |
| 231 // Register this source with the WebRtcAudioDeviceImpl. |
| 232 rtc_audio_device->AddAudioCapturer(this); |
185 | 233 |
186 return true; | 234 return true; |
187 } | 235 } |
188 | 236 |
189 WebRtcAudioCapturer::WebRtcAudioCapturer( | 237 void ProcessedLocalAudioSource::EnsureSourceIsStopped() { |
190 int render_frame_id, | 238 DCHECK(thread_checker_.CalledOnValidThread()); |
191 const StreamDeviceInfo& device_info, | |
192 const blink::WebMediaConstraints& constraints, | |
193 WebRtcAudioDeviceImpl* audio_device, | |
194 MediaStreamAudioSource* audio_source) | |
195 : constraints_(constraints), | |
196 audio_processor_(new rtc::RefCountedObject<MediaStreamAudioProcessor>( | |
197 constraints, | |
198 device_info.device.input, | |
199 audio_device)), | |
200 running_(false), | |
201 render_frame_id_(render_frame_id), | |
202 device_info_(device_info), | |
203 volume_(0), | |
204 peer_connection_mode_(false), | |
205 audio_device_(audio_device), | |
206 audio_source_(audio_source) { | |
207 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; | |
208 } | |
209 | 239 |
210 WebRtcAudioCapturer::~WebRtcAudioCapturer() { | 240 if (!source_) |
211 DCHECK(thread_checker_.CalledOnValidThread()); | 241 return; |
212 DCHECK(tracks_.IsEmpty()); | |
213 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; | |
214 Stop(); | |
215 } | |
216 | 242 |
217 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { | 243 if (WebRtcAudioDeviceImpl* rtc_audio_device = |
218 DCHECK(thread_checker_.CalledOnValidThread()); | 244 pc_factory_->GetWebRtcAudioDevice()) { |
219 DCHECK(track); | 245 rtc_audio_device->RemoveAudioCapturer(this); |
220 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; | |
221 | |
222 track->SetLevel(level_calculator_.level()); | |
223 | |
224 // The track only grabs stats from the audio processor. Stats are only | |
225 // available if audio processing is turned on. Therefore, only provide the | |
226 // track a reference if audio processing is turned on. | |
227 if (audio_processor_->has_audio_processing()) | |
228 track->SetAudioProcessor(audio_processor_); | |
229 | |
230 { | |
231 base::AutoLock auto_lock(lock_); | |
232 // Verify that |track| is not already added to the list. | |
233 DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track))); | |
234 | |
235 // Add with a tag, so we remember to call OnSetFormat() on the new | |
236 // track. | |
237 scoped_refptr<TrackOwner> track_owner(new TrackOwner(track)); | |
238 tracks_.AddAndTag(track_owner.get()); | |
239 } | |
240 } | |
241 | |
242 void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { | |
243 DCHECK(thread_checker_.CalledOnValidThread()); | |
244 DVLOG(1) << "WebRtcAudioCapturer::RemoveTrack()"; | |
245 bool stop_source = false; | |
246 { | |
247 base::AutoLock auto_lock(lock_); | |
248 | |
249 scoped_refptr<TrackOwner> removed_item = | |
250 tracks_.Remove(TrackOwner::TrackWrapper(track)); | |
251 | |
252 // Clear the delegate to ensure that no more capture callbacks will | |
253 // be sent to this sink. Also avoids a possible crash which can happen | |
254 // if this method is called while capturing is active. | |
255 if (removed_item.get()) { | |
256 removed_item->Reset(); | |
257 stop_source = tracks_.IsEmpty(); | |
258 } | |
259 } | |
260 if (stop_source) { | |
261 // Since WebRtcAudioCapturer does not inherit MediaStreamAudioSource, | |
262 // and instead MediaStreamAudioSource is composed of a WebRtcAudioCapturer, | |
263 // we have to call StopSource on the MediaStreamSource. This will call | |
264 // MediaStreamAudioSource::DoStopSource which in turn call | |
265 // WebRtcAudioCapturerer::Stop(); | |
266 audio_source_->StopSource(); | |
267 } | |
268 } | |
269 | |
270 void WebRtcAudioCapturer::SetCapturerSourceInternal( | |
271 const scoped_refptr<media::AudioCapturerSource>& source, | |
272 media::ChannelLayout channel_layout, | |
273 int sample_rate) { | |
274 DCHECK(thread_checker_.CalledOnValidThread()); | |
275 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," | |
276 << "sample_rate=" << sample_rate << ")"; | |
277 scoped_refptr<media::AudioCapturerSource> old_source; | |
278 { | |
279 base::AutoLock auto_lock(lock_); | |
280 if (source_.get() == source.get()) | |
281 return; | |
282 | |
283 source_.swap(old_source); | |
284 source_ = source; | |
285 | |
286 // Reset the flag to allow starting the new source. | |
287 running_ = false; | |
288 } | 246 } |
289 | 247 |
290 DVLOG(1) << "Switching to a new capture source."; | 248 // Note: Stopping the source while holding the |volume_lock_| because the |
291 if (old_source.get()) | 249 // SetVolume() method needs to know whether |source_| is valid. |
292 old_source->Stop(); | |
293 | |
294 // Dispatch the new parameters both to the sink(s) and to the new source, | |
295 // also apply the new |constraints|. | |
296 // The idea is to get rid of any dependency of the microphone parameters | |
297 // which would normally be used by default. | |
298 // bits_per_sample is always 16 for now. | |
299 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
300 channel_layout, sample_rate, 16, | |
301 GetBufferSize(sample_rate)); | |
302 params.set_effects(device_info_.device.input.effects); | |
303 DCHECK(params.IsValid()); | |
304 | |
305 { | 250 { |
306 base::AutoLock auto_lock(lock_); | 251 base::AutoLock auto_lock(volume_lock_); |
307 | 252 source_->Stop(); |
308 // Notify the |audio_processor_| of the new format. We're doing this while | 253 source_ = nullptr; |
309 // the lock is held only because the signaling thread might be calling | |
310 // GetInputFormat(). Simultaneous reads from the audio thread are NOT the | |
311 // concern here since the source is currently stopped (i.e., no audio | |
312 // capture calls can be executing). | |
313 audio_processor_->OnCaptureFormatChanged(params); | |
314 | |
315 // Notify all tracks about the new format. | |
316 tracks_.TagAll(); | |
317 } | 254 } |
318 | 255 |
319 if (source.get()) | |
320 source->Initialize(params, this, device_info_.session_id); | |
321 | |
322 Start(); | |
323 } | |
324 | |
325 void WebRtcAudioCapturer::EnablePeerConnectionMode() { | |
326 DCHECK(thread_checker_.CalledOnValidThread()); | |
327 DVLOG(1) << "EnablePeerConnectionMode"; | |
328 // Do nothing if the peer connection mode has been enabled. | |
329 if (peer_connection_mode_) | |
330 return; | |
331 | |
332 peer_connection_mode_ = true; | |
333 int render_frame_id = -1; | |
334 media::AudioParameters input_params; | |
335 { | |
336 base::AutoLock auto_lock(lock_); | |
337 // Simply return if there is no existing source or the |render_frame_id_| is | |
338 // not valid. | |
339 if (!source_.get() || render_frame_id_ == -1) | |
340 return; | |
341 | |
342 render_frame_id = render_frame_id_; | |
343 input_params = audio_processor_->InputFormat(); | |
344 } | |
345 | |
346 // Do nothing if the current buffer size is the WebRtc native buffer size. | |
347 if (GetBufferSize(input_params.sample_rate()) == | |
348 input_params.frames_per_buffer()) { | |
349 return; | |
350 } | |
351 | |
352 // Create a new audio stream as source which will open the hardware using | |
353 // WebRtc native buffer size. | |
354 SetCapturerSourceInternal( | |
355 AudioDeviceFactory::NewAudioCapturerSource(render_frame_id), | |
356 input_params.channel_layout(), input_params.sample_rate()); | |
357 } | |
358 | |
359 void WebRtcAudioCapturer::Start() { | |
360 DCHECK(thread_checker_.CalledOnValidThread()); | |
361 DVLOG(1) << "WebRtcAudioCapturer::Start()"; | |
362 base::AutoLock auto_lock(lock_); | |
363 if (running_ || !source_.get()) | |
364 return; | |
365 | |
366 // Start the data source, i.e., start capturing data from the current source. | |
367 // We need to set the AGC control before starting the stream. | |
368 source_->SetAutomaticGainControl(true); | |
369 source_->Start(); | |
370 running_ = true; | |
371 } | |
372 | |
373 void WebRtcAudioCapturer::Stop() { | |
374 DCHECK(thread_checker_.CalledOnValidThread()); | |
375 DVLOG(1) << "WebRtcAudioCapturer::Stop()"; | |
376 scoped_refptr<media::AudioCapturerSource> source; | |
377 TrackList::ItemList tracks; | |
378 { | |
379 base::AutoLock auto_lock(lock_); | |
380 if (!running_) | |
381 return; | |
382 | |
383 source = source_; | |
384 tracks = tracks_.Items(); | |
385 tracks_.Clear(); | |
386 running_ = false; | |
387 } | |
388 | |
389 // Remove the capturer object from the WebRtcAudioDeviceImpl. | |
390 if (audio_device_) | |
391 audio_device_->RemoveAudioCapturer(this); | |
392 | |
393 for (TrackList::ItemList::const_iterator it = tracks.begin(); | |
394 it != tracks.end(); | |
395 ++it) { | |
396 (*it)->Stop(); | |
397 } | |
398 | |
399 if (source.get()) | |
400 source->Stop(); | |
401 | |
402 // Stop the audio processor to avoid feeding render data into the processor. | 256 // Stop the audio processor to avoid feeding render data into the processor. |
403 audio_processor_->Stop(); | 257 audio_processor_->Stop(); |
| 258 |
| 259 VLOG(1) << "Stopped WebRTC audio pipeline for consumption by render frame " |
| 260 << consumer_render_frame_id_ << '.'; |
404 } | 261 } |
405 | 262 |
406 void WebRtcAudioCapturer::SetVolume(int volume) { | 263 void ProcessedLocalAudioSource::SetVolume(int volume) { |
407 DVLOG(1) << "WebRtcAudioCapturer::SetVolume()"; | 264 DVLOG(1) << "ProcessedLocalAudioSource::SetVolume()"; |
408 DCHECK_LE(volume, MaxVolume()); | 265 DCHECK_LE(volume, MaxVolume()); |
409 double normalized_volume = static_cast<double>(volume) / MaxVolume(); | 266 double normalized_volume = static_cast<double>(volume) / MaxVolume(); |
410 base::AutoLock auto_lock(lock_); | 267 base::AutoLock auto_lock(volume_lock_); |
411 if (source_.get()) | 268 if (source_) |
412 source_->SetVolume(normalized_volume); | 269 source_->SetVolume(normalized_volume); |
413 } | 270 } |
414 | 271 |
415 int WebRtcAudioCapturer::Volume() const { | 272 int ProcessedLocalAudioSource::Volume() const { |
416 base::AutoLock auto_lock(lock_); | 273 base::AutoLock auto_lock(volume_lock_); |
417 return volume_; | 274 return volume_; |
418 } | 275 } |
419 | 276 |
420 int WebRtcAudioCapturer::MaxVolume() const { | 277 int ProcessedLocalAudioSource::MaxVolume() const { |
421 return WebRtcAudioDeviceImpl::kMaxVolumeLevel; | 278 return WebRtcAudioDeviceImpl::kMaxVolumeLevel; |
422 } | 279 } |
423 | 280 |
424 media::AudioParameters WebRtcAudioCapturer::GetOutputFormat() const { | 281 void ProcessedLocalAudioSource::Capture(const media::AudioBus* audio_bus, |
425 DCHECK(thread_checker_.CalledOnValidThread()); | 282 int audio_delay_milliseconds, |
426 return audio_processor_->OutputFormat(); | 283 double volume, |
427 } | 284 bool key_pressed) { |
428 | |
429 void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, | |
430 int audio_delay_milliseconds, | |
431 double volume, | |
432 bool key_pressed) { | |
433 // This callback is driven by AudioInputDevice::AudioThreadCallback if | |
434 // |source_| is AudioInputDevice, otherwise it is driven by client's | |
435 // CaptureCallback. | |
436 #if defined(OS_WIN) || defined(OS_MACOSX) | 285 #if defined(OS_WIN) || defined(OS_MACOSX) |
437 DCHECK_LE(volume, 1.0); | 286 DCHECK_LE(volume, 1.0); |
438 #elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD) | 287 #elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD) |
439 // We have a special situation on Linux where the microphone volume can be | 288 // We have a special situation on Linux where the microphone volume can be |
440 // "higher than maximum". The input volume slider in the sound preference | 289 // "higher than maximum". The input volume slider in the sound preference |
441 // allows the user to set a scaling that is higher than 100%. It means that | 290 // allows the user to set a scaling that is higher than 100%. It means that |
442 // even if the reported maximum levels is N, the actual microphone level can | 291 // even if the reported maximum levels is N, the actual microphone level can |
443 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. | 292 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. |
444 DCHECK_LE(volume, 1.6); | 293 DCHECK_LE(volume, 1.6); |
445 #endif | 294 #endif |
446 | 295 |
447 // TODO(miu): Plumbing is needed to determine the actual capture timestamp | 296 // TODO(miu): Plumbing is needed to determine the actual capture timestamp |
448 // of the audio, instead of just snapshotting TimeTicks::Now(), for proper | 297 // of the audio, instead of just snapshotting TimeTicks::Now(), for proper |
449 // audio/video sync. http://crbug.com/335335 | 298 // audio/video sync. http://crbug.com/335335 |
450 const base::TimeTicks reference_clock_snapshot = base::TimeTicks::Now(); | 299 const base::TimeTicks reference_clock_snapshot = base::TimeTicks::Now(); |
451 | 300 |
452 TrackList::ItemList tracks; | 301 // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC. |
453 TrackList::ItemList tracks_to_notify_format; | 302 // The volume can be higher than 255 on Linux, and it will be cropped to |
454 int current_volume = 0; | 303 // 255 since AGC does not allow values out of range. |
| 304 int current_volume = static_cast<int>((volume * MaxVolume()) + 0.5); |
455 { | 305 { |
456 base::AutoLock auto_lock(lock_); | 306 base::AutoLock auto_lock(volume_lock_); |
457 if (!running_) | 307 volume_ = current_volume; |
458 return; | |
459 | |
460 // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC. | |
461 // The volume can be higher than 255 on Linux, and it will be cropped to | |
462 // 255 since AGC does not allow values out of range. | |
463 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); | |
464 current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_; | |
465 tracks = tracks_.Items(); | |
466 tracks_.RetrieveAndClearTags(&tracks_to_notify_format); | |
467 } | 308 } |
| 309 current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_; |
468 | 310 |
469 // Sanity-check the input audio format in debug builds. Then, notify the | 311 // Sanity-check the input audio format in debug builds. Then, notify the |
470 // tracks if the format has changed. | 312 // tracks if the format has changed. |
471 // | 313 // |
472 // Locking is not needed here to read the audio input/output parameters | 314 // Locking is not needed here to read the audio input/output parameters |
473 // because the audio processor format changes only occur while audio capture | 315 // because the audio processor format changes only occur while audio capture |
474 // is stopped. | 316 // is stopped. |
475 DCHECK(audio_processor_->InputFormat().IsValid()); | 317 DCHECK(audio_processor_->InputFormat().IsValid()); |
476 DCHECK_EQ(audio_source->channels(), | 318 DCHECK_EQ(audio_bus->channels(), audio_processor_->InputFormat().channels()); |
477 audio_processor_->InputFormat().channels()); | 319 DCHECK_EQ(audio_bus->frames(), |
478 DCHECK_EQ(audio_source->frames(), | |
479 audio_processor_->InputFormat().frames_per_buffer()); | 320 audio_processor_->InputFormat().frames_per_buffer()); |
480 if (!tracks_to_notify_format.empty()) { | |
481 const media::AudioParameters& output_params = | |
482 audio_processor_->OutputFormat(); | |
483 for (const auto& track : tracks_to_notify_format) | |
484 track->OnSetFormat(output_params); | |
485 } | |
486 | 321 |
487 // Figure out if the pre-processed data has any energy or not. This | 322 // Figure out if the pre-processed data has any energy or not. This |
488 // information will be passed to the level calculator to force it to report | 323 // information will be passed to the level calculator to force it to report |
489 // energy in case the post-processed data is zeroed by the audio processing. | 324 // energy in case the post-processed data is zeroed by the audio processing. |
490 const bool force_report_nonzero_energy = !audio_source->AreFramesZero(); | 325 const bool force_report_nonzero_energy = !audio_bus->AreFramesZero(); |
491 | 326 |
492 // Push the data to the processor for processing. | 327 // Push the data to the processor for processing. |
493 audio_processor_->PushCaptureData( | 328 audio_processor_->PushCaptureData( |
494 *audio_source, | 329 *audio_bus, |
495 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); | 330 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); |
496 | 331 |
497 // Process and consume the data in the processor until there is not enough | 332 // Process and consume the data in the processor until there is not enough |
498 // data in the processor. | 333 // data in the processor. |
499 media::AudioBus* processed_data = nullptr; | 334 media::AudioBus* processed_data = nullptr; |
500 base::TimeDelta processed_data_audio_delay; | 335 base::TimeDelta processed_data_audio_delay; |
501 int new_volume = 0; | 336 int new_volume = 0; |
502 while (audio_processor_->ProcessAndConsumeData( | 337 while (audio_processor_->ProcessAndConsumeData( |
503 current_volume, key_pressed, | 338 current_volume, key_pressed, |
504 &processed_data, &processed_data_audio_delay, &new_volume)) { | 339 &processed_data, &processed_data_audio_delay, &new_volume)) { |
505 DCHECK(processed_data); | 340 DCHECK(processed_data); |
506 | 341 |
507 level_calculator_.Calculate(*processed_data, force_report_nonzero_energy); | 342 level_calculator_.Calculate(*processed_data, force_report_nonzero_energy); |
508 | 343 |
509 const base::TimeTicks processed_data_capture_time = | 344 MediaStreamAudioSource::DeliverDataToTracks( |
510 reference_clock_snapshot - processed_data_audio_delay; | 345 *processed_data, reference_clock_snapshot - processed_data_audio_delay); |
511 for (const auto& track : tracks) | |
512 track->Capture(*processed_data, processed_data_capture_time); | |
513 | 346 |
514 if (new_volume) { | 347 if (new_volume) { |
515 SetVolume(new_volume); | 348 SetVolume(new_volume); |
516 | 349 |
517 // Update the |current_volume| to avoid passing the old volume to AGC. | 350 // Update the |current_volume| to avoid passing the old volume to AGC. |
518 current_volume = new_volume; | 351 current_volume = new_volume; |
519 } | 352 } |
520 } | 353 } |
521 } | 354 } |
522 | 355 |
523 void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { | 356 void ProcessedLocalAudioSource::OnCaptureError(const std::string& message) { |
524 WebRtcLogMessage("WAC::OnCaptureError: " + message); | 357 WebRtcLogMessage("ProcessedLocalAudioSource::OnCaptureError: " + message); |
525 } | 358 } |
526 | 359 |
527 media::AudioParameters WebRtcAudioCapturer::GetInputFormat() const { | 360 media::AudioParameters ProcessedLocalAudioSource::GetInputFormat() const { |
528 base::AutoLock auto_lock(lock_); | 361 return audio_processor_ ? audio_processor_->InputFormat() |
529 return audio_processor_->InputFormat(); | 362 : media::AudioParameters(); |
530 } | 363 } |
531 | 364 |
532 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { | 365 int ProcessedLocalAudioSource::GetBufferSize(int sample_rate) const { |
533 DCHECK(thread_checker_.CalledOnValidThread()); | 366 DCHECK(thread_checker_.CalledOnValidThread()); |
534 #if defined(OS_ANDROID) | 367 #if defined(OS_ANDROID) |
535 // TODO(henrika): Tune and adjust buffer size on Android. | 368 // TODO(henrika): Re-evaluate whether to use same logic as other platforms. |
536 return (2 * sample_rate / 100); | 369 return (2 * sample_rate / 100); |
537 #endif | 370 #endif |
538 | 371 |
539 // PeerConnection is running at a buffer size of 10ms data. A multiple of | 372 // If audio processing is turned on, require 10ms buffers. |
540 // 10ms as the buffer size can give the best performance to PeerConnection. | 373 if (audio_processor_->has_audio_processing()) |
541 int peer_connection_buffer_size = sample_rate / 100; | 374 return (sample_rate / 100); |
542 | 375 |
543 // Use the native hardware buffer size in non peer connection mode when the | 376 // If audio processing is off and the native hardware buffer size was |
544 // platform is using a native buffer size smaller than the PeerConnection | 377 // provided, use it. It can be harmful, in terms of CPU/power consumption, to |
545 // buffer size and audio processing is off. | 378 // use smaller buffer sizes than the native size (http://crbug.com/362261). |
546 int hardware_buffer_size = device_info_.device.input.frames_per_buffer; | 379 if (int hardware_buffer_size = device_info().device.input.frames_per_buffer) |
547 if (!peer_connection_mode_ && hardware_buffer_size && | |
548 hardware_buffer_size <= peer_connection_buffer_size && | |
549 !audio_processor_->has_audio_processing()) { | |
550 DVLOG(1) << "WebRtcAudioCapturer is using hardware buffer size " | |
551 << hardware_buffer_size; | |
552 return hardware_buffer_size; | 380 return hardware_buffer_size; |
553 } | |
554 | 381 |
555 return (sample_rate / 100); | 382 // If the buffer size is missing from the StreamDeviceInfo, provide the |
556 } | 383 // platform's default. |
557 | 384 // |
558 void WebRtcAudioCapturer::SetCapturerSource( | 385 // TODO(miu): Identify where/why the buffer size might be missing, fix the |
559 const scoped_refptr<media::AudioCapturerSource>& source, | 386 // code, and then require it here. |
560 media::AudioParameters params) { | 387 return RenderThreadImpl::current()->GetAudioHardwareConfig() |
561 // Create a new audio stream as source which uses the new source. | 388 ->GetOutputBufferSize(); |
562 SetCapturerSourceInternal(source, params.channel_layout(), | |
563 params.sample_rate()); | |
564 } | 389 } |
565 | 390 |
566 } // namespace content | 391 } // namespace content |
OLD | NEW |