OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/media/webrtc_audio_capturer.h" | |
6 | |
7 #include "base/bind.h" | |
8 #include "base/logging.h" | |
9 #include "base/macros.h" | |
10 #include "base/metrics/histogram.h" | |
11 #include "base/strings/string_util.h" | |
12 #include "base/strings/stringprintf.h" | |
13 #include "build/build_config.h" | |
14 #include "content/child/child_process.h" | |
15 #include "content/renderer/media/audio_device_factory.h" | |
16 #include "content/renderer/media/media_stream_audio_processor.h" | |
17 #include "content/renderer/media/media_stream_audio_processor_options.h" | |
18 #include "content/renderer/media/media_stream_audio_source.h" | |
19 #include "content/renderer/media/media_stream_constraints_util.h" | |
20 #include "content/renderer/media/webrtc_audio_device_impl.h" | |
21 #include "content/renderer/media/webrtc_local_audio_track.h" | |
22 #include "content/renderer/media/webrtc_logging.h" | |
23 #include "media/audio/sample_rates.h" | |
24 | |
25 namespace content { | |
26 | |
27 // Reference counted container of WebRtcLocalAudioTrack delegate. | |
28 // TODO(xians): Switch to MediaStreamAudioSinkOwner. | |
29 class WebRtcAudioCapturer::TrackOwner | |
30 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { | |
31 public: | |
32 explicit TrackOwner(WebRtcLocalAudioTrack* track) | |
33 : delegate_(track) {} | |
34 | |
35 void Capture(const media::AudioBus& audio_bus, | |
36 base::TimeTicks estimated_capture_time) { | |
37 base::AutoLock lock(lock_); | |
38 if (delegate_) { | |
39 delegate_->Capture(audio_bus, estimated_capture_time); | |
40 } | |
41 } | |
42 | |
43 void OnSetFormat(const media::AudioParameters& params) { | |
44 base::AutoLock lock(lock_); | |
45 if (delegate_) | |
46 delegate_->OnSetFormat(params); | |
47 } | |
48 | |
49 void Reset() { | |
50 base::AutoLock lock(lock_); | |
51 delegate_ = NULL; | |
52 } | |
53 | |
54 void Stop() { | |
55 base::AutoLock lock(lock_); | |
56 DCHECK(delegate_); | |
57 | |
58 // This can be reentrant so reset |delegate_| before calling out. | |
59 WebRtcLocalAudioTrack* temp = delegate_; | |
60 delegate_ = NULL; | |
61 temp->Stop(); | |
62 } | |
63 | |
64 // Wrapper which allows to use std::find_if() when adding and removing | |
65 // sinks to/from the list. | |
66 struct TrackWrapper { | |
67 explicit TrackWrapper(WebRtcLocalAudioTrack* track) : track_(track) {} | |
68 bool operator()( | |
69 const scoped_refptr<WebRtcAudioCapturer::TrackOwner>& owner) const { | |
70 return owner->IsEqual(track_); | |
71 } | |
72 WebRtcLocalAudioTrack* track_; | |
73 }; | |
74 | |
75 protected: | |
76 virtual ~TrackOwner() {} | |
77 | |
78 private: | |
79 friend class base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner>; | |
80 | |
81 bool IsEqual(const WebRtcLocalAudioTrack* other) const { | |
82 base::AutoLock lock(lock_); | |
83 return (other == delegate_); | |
84 } | |
85 | |
86 // Do NOT reference count the |delegate_| to avoid cyclic reference counting. | |
87 WebRtcLocalAudioTrack* delegate_; | |
88 mutable base::Lock lock_; | |
89 | |
90 DISALLOW_COPY_AND_ASSIGN(TrackOwner); | |
91 }; | |
92 | |
93 // static | |
94 std::unique_ptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( | |
95 int render_frame_id, | |
96 const StreamDeviceInfo& device_info, | |
97 const blink::WebMediaConstraints& constraints, | |
98 WebRtcAudioDeviceImpl* audio_device, | |
99 MediaStreamAudioSource* audio_source) { | |
100 std::unique_ptr<WebRtcAudioCapturer> capturer(new WebRtcAudioCapturer( | |
101 render_frame_id, device_info, constraints, audio_device, audio_source)); | |
102 if (capturer->Initialize()) | |
103 return capturer; | |
104 | |
105 return NULL; | |
106 } | |
107 | |
108 bool WebRtcAudioCapturer::Initialize() { | |
109 DCHECK(thread_checker_.CalledOnValidThread()); | |
110 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; | |
111 WebRtcLogMessage(base::StringPrintf( | |
112 "WAC::Initialize. render_frame_id=%d" | |
113 ", channel_layout=%d, sample_rate=%d, buffer_size=%d" | |
114 ", session_id=%d, paired_output_sample_rate=%d" | |
115 ", paired_output_frames_per_buffer=%d, effects=%d. ", | |
116 render_frame_id_, device_info_.device.input.channel_layout, | |
117 device_info_.device.input.sample_rate, | |
118 device_info_.device.input.frames_per_buffer, device_info_.session_id, | |
119 device_info_.device.matched_output.sample_rate, | |
120 device_info_.device.matched_output.frames_per_buffer, | |
121 device_info_.device.input.effects)); | |
122 | |
123 if (render_frame_id_ == -1) { | |
124 // Return true here to allow injecting a new source via | |
125 // SetCapturerSourceForTesting() at a later state. | |
126 return true; | |
127 } | |
128 | |
129 MediaAudioConstraints audio_constraints(constraints_, | |
130 device_info_.device.input.effects); | |
131 if (!audio_constraints.IsValid()) | |
132 return false; | |
133 | |
134 media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>( | |
135 device_info_.device.input.channel_layout); | |
136 | |
137 // If KEYBOARD_MIC effect is set, change the layout to the corresponding | |
138 // layout that includes the keyboard mic. | |
139 if ((device_info_.device.input.effects & | |
140 media::AudioParameters::KEYBOARD_MIC) && | |
141 audio_constraints.GetGoogExperimentalNoiseSuppression()) { | |
142 if (channel_layout == media::CHANNEL_LAYOUT_STEREO) { | |
143 channel_layout = media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC; | |
144 DVLOG(1) << "Changed stereo layout to stereo + keyboard mic layout due " | |
145 << "to KEYBOARD_MIC effect."; | |
146 } else { | |
147 DVLOG(1) << "KEYBOARD_MIC effect ignored, not compatible with layout " | |
148 << channel_layout; | |
149 } | |
150 } | |
151 | |
152 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; | |
153 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", | |
154 channel_layout, media::CHANNEL_LAYOUT_MAX + 1); | |
155 | |
156 // Verify that the reported input channel configuration is supported. | |
157 if (channel_layout != media::CHANNEL_LAYOUT_MONO && | |
158 channel_layout != media::CHANNEL_LAYOUT_STEREO && | |
159 channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) { | |
160 DLOG(ERROR) << channel_layout | |
161 << " is not a supported input channel configuration."; | |
162 return false; | |
163 } | |
164 | |
165 DVLOG(1) << "Audio input hardware sample rate: " | |
166 << device_info_.device.input.sample_rate; | |
167 media::AudioSampleRate asr; | |
168 if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) { | |
169 UMA_HISTOGRAM_ENUMERATION( | |
170 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); | |
171 } else { | |
172 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", | |
173 device_info_.device.input.sample_rate); | |
174 } | |
175 | |
176 // Create and configure the default audio capturing source. | |
177 SetCapturerSourceInternal( | |
178 AudioDeviceFactory::NewAudioCapturerSource(render_frame_id_), | |
179 channel_layout, device_info_.device.input.sample_rate); | |
180 | |
181 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware | |
182 // information from the capturer. | |
183 if (audio_device_) | |
184 audio_device_->AddAudioCapturer(this); | |
185 | |
186 return true; | |
187 } | |
188 | |
189 WebRtcAudioCapturer::WebRtcAudioCapturer( | |
190 int render_frame_id, | |
191 const StreamDeviceInfo& device_info, | |
192 const blink::WebMediaConstraints& constraints, | |
193 WebRtcAudioDeviceImpl* audio_device, | |
194 MediaStreamAudioSource* audio_source) | |
195 : constraints_(constraints), | |
196 audio_processor_(new rtc::RefCountedObject<MediaStreamAudioProcessor>( | |
197 constraints, | |
198 device_info.device.input, | |
199 audio_device)), | |
200 running_(false), | |
201 render_frame_id_(render_frame_id), | |
202 device_info_(device_info), | |
203 volume_(0), | |
204 peer_connection_mode_(false), | |
205 audio_device_(audio_device), | |
206 audio_source_(audio_source) { | |
207 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; | |
208 } | |
209 | |
210 WebRtcAudioCapturer::~WebRtcAudioCapturer() { | |
211 DCHECK(thread_checker_.CalledOnValidThread()); | |
212 DCHECK(tracks_.IsEmpty()); | |
213 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; | |
214 Stop(); | |
215 } | |
216 | |
217 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { | |
218 DCHECK(thread_checker_.CalledOnValidThread()); | |
219 DCHECK(track); | |
220 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; | |
221 | |
222 track->SetLevel(level_calculator_.level()); | |
223 | |
224 // The track only grabs stats from the audio processor. Stats are only | |
225 // available if audio processing is turned on. Therefore, only provide the | |
226 // track a reference if audio processing is turned on. | |
227 if (audio_processor_->has_audio_processing()) | |
228 track->SetAudioProcessor(audio_processor_); | |
229 | |
230 { | |
231 base::AutoLock auto_lock(lock_); | |
232 // Verify that |track| is not already added to the list. | |
233 DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track))); | |
234 | |
235 // Add with a tag, so we remember to call OnSetFormat() on the new | |
236 // track. | |
237 scoped_refptr<TrackOwner> track_owner(new TrackOwner(track)); | |
238 tracks_.AddAndTag(track_owner.get()); | |
239 } | |
240 } | |
241 | |
242 void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { | |
243 DCHECK(thread_checker_.CalledOnValidThread()); | |
244 DVLOG(1) << "WebRtcAudioCapturer::RemoveTrack()"; | |
245 bool stop_source = false; | |
246 { | |
247 base::AutoLock auto_lock(lock_); | |
248 | |
249 scoped_refptr<TrackOwner> removed_item = | |
250 tracks_.Remove(TrackOwner::TrackWrapper(track)); | |
251 | |
252 // Clear the delegate to ensure that no more capture callbacks will | |
253 // be sent to this sink. Also avoids a possible crash which can happen | |
254 // if this method is called while capturing is active. | |
255 if (removed_item.get()) { | |
256 removed_item->Reset(); | |
257 stop_source = tracks_.IsEmpty(); | |
258 } | |
259 } | |
260 if (stop_source) { | |
261 // Since WebRtcAudioCapturer does not inherit MediaStreamAudioSource, | |
262 // and instead MediaStreamAudioSource is composed of a WebRtcAudioCapturer, | |
263 // we have to call StopSource on the MediaStreamSource. This will call | |
264 // MediaStreamAudioSource::DoStopSource which in turn call | |
265 // WebRtcAudioCapturerer::Stop(); | |
266 audio_source_->StopSource(); | |
267 } | |
268 } | |
269 | |
270 void WebRtcAudioCapturer::SetCapturerSourceInternal( | |
271 const scoped_refptr<media::AudioCapturerSource>& source, | |
272 media::ChannelLayout channel_layout, | |
273 int sample_rate) { | |
274 DCHECK(thread_checker_.CalledOnValidThread()); | |
275 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," | |
276 << "sample_rate=" << sample_rate << ")"; | |
277 scoped_refptr<media::AudioCapturerSource> old_source; | |
278 { | |
279 base::AutoLock auto_lock(lock_); | |
280 if (source_.get() == source.get()) | |
281 return; | |
282 | |
283 source_.swap(old_source); | |
284 source_ = source; | |
285 | |
286 // Reset the flag to allow starting the new source. | |
287 running_ = false; | |
288 } | |
289 | |
290 DVLOG(1) << "Switching to a new capture source."; | |
291 if (old_source.get()) | |
292 old_source->Stop(); | |
293 | |
294 // Dispatch the new parameters both to the sink(s) and to the new source, | |
295 // also apply the new |constraints|. | |
296 // The idea is to get rid of any dependency of the microphone parameters | |
297 // which would normally be used by default. | |
298 // bits_per_sample is always 16 for now. | |
299 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
300 channel_layout, sample_rate, 16, | |
301 GetBufferSize(sample_rate)); | |
302 params.set_effects(device_info_.device.input.effects); | |
303 DCHECK(params.IsValid()); | |
304 | |
305 { | |
306 base::AutoLock auto_lock(lock_); | |
307 | |
308 // Notify the |audio_processor_| of the new format. We're doing this while | |
309 // the lock is held only because the signaling thread might be calling | |
310 // GetInputFormat(). Simultaneous reads from the audio thread are NOT the | |
311 // concern here since the source is currently stopped (i.e., no audio | |
312 // capture calls can be executing). | |
313 audio_processor_->OnCaptureFormatChanged(params); | |
314 | |
315 // Notify all tracks about the new format. | |
316 tracks_.TagAll(); | |
317 } | |
318 | |
319 if (source.get()) | |
320 source->Initialize(params, this, device_info_.session_id); | |
321 | |
322 Start(); | |
323 } | |
324 | |
325 void WebRtcAudioCapturer::EnablePeerConnectionMode() { | |
326 DCHECK(thread_checker_.CalledOnValidThread()); | |
327 DVLOG(1) << "EnablePeerConnectionMode"; | |
328 // Do nothing if the peer connection mode has been enabled. | |
329 if (peer_connection_mode_) | |
330 return; | |
331 | |
332 peer_connection_mode_ = true; | |
333 int render_frame_id = -1; | |
334 media::AudioParameters input_params; | |
335 { | |
336 base::AutoLock auto_lock(lock_); | |
337 // Simply return if there is no existing source or the |render_frame_id_| is | |
338 // not valid. | |
339 if (!source_.get() || render_frame_id_ == -1) | |
340 return; | |
341 | |
342 render_frame_id = render_frame_id_; | |
343 input_params = audio_processor_->InputFormat(); | |
344 } | |
345 | |
346 // Do nothing if the current buffer size is the WebRtc native buffer size. | |
347 if (GetBufferSize(input_params.sample_rate()) == | |
348 input_params.frames_per_buffer()) { | |
349 return; | |
350 } | |
351 | |
352 // Create a new audio stream as source which will open the hardware using | |
353 // WebRtc native buffer size. | |
354 SetCapturerSourceInternal( | |
355 AudioDeviceFactory::NewAudioCapturerSource(render_frame_id), | |
356 input_params.channel_layout(), input_params.sample_rate()); | |
357 } | |
358 | |
359 void WebRtcAudioCapturer::Start() { | |
360 DCHECK(thread_checker_.CalledOnValidThread()); | |
361 DVLOG(1) << "WebRtcAudioCapturer::Start()"; | |
362 base::AutoLock auto_lock(lock_); | |
363 if (running_ || !source_.get()) | |
364 return; | |
365 | |
366 // Start the data source, i.e., start capturing data from the current source. | |
367 // We need to set the AGC control before starting the stream. | |
368 source_->SetAutomaticGainControl(true); | |
369 source_->Start(); | |
370 running_ = true; | |
371 } | |
372 | |
373 void WebRtcAudioCapturer::Stop() { | |
374 DCHECK(thread_checker_.CalledOnValidThread()); | |
375 DVLOG(1) << "WebRtcAudioCapturer::Stop()"; | |
376 scoped_refptr<media::AudioCapturerSource> source; | |
377 TrackList::ItemList tracks; | |
378 { | |
379 base::AutoLock auto_lock(lock_); | |
380 if (!running_) | |
381 return; | |
382 | |
383 source = source_; | |
384 tracks = tracks_.Items(); | |
385 tracks_.Clear(); | |
386 running_ = false; | |
387 } | |
388 | |
389 // Remove the capturer object from the WebRtcAudioDeviceImpl. | |
390 if (audio_device_) | |
391 audio_device_->RemoveAudioCapturer(this); | |
392 | |
393 for (TrackList::ItemList::const_iterator it = tracks.begin(); | |
394 it != tracks.end(); | |
395 ++it) { | |
396 (*it)->Stop(); | |
397 } | |
398 | |
399 if (source.get()) | |
400 source->Stop(); | |
401 | |
402 // Stop the audio processor to avoid feeding render data into the processor. | |
403 audio_processor_->Stop(); | |
404 } | |
405 | |
406 void WebRtcAudioCapturer::SetVolume(int volume) { | |
407 DVLOG(1) << "WebRtcAudioCapturer::SetVolume()"; | |
408 DCHECK_LE(volume, MaxVolume()); | |
409 double normalized_volume = static_cast<double>(volume) / MaxVolume(); | |
410 base::AutoLock auto_lock(lock_); | |
411 if (source_.get()) | |
412 source_->SetVolume(normalized_volume); | |
413 } | |
414 | |
415 int WebRtcAudioCapturer::Volume() const { | |
416 base::AutoLock auto_lock(lock_); | |
417 return volume_; | |
418 } | |
419 | |
420 int WebRtcAudioCapturer::MaxVolume() const { | |
421 return WebRtcAudioDeviceImpl::kMaxVolumeLevel; | |
422 } | |
423 | |
424 media::AudioParameters WebRtcAudioCapturer::GetOutputFormat() const { | |
425 DCHECK(thread_checker_.CalledOnValidThread()); | |
426 return audio_processor_->OutputFormat(); | |
427 } | |
428 | |
429 void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, | |
430 int audio_delay_milliseconds, | |
431 double volume, | |
432 bool key_pressed) { | |
433 // This callback is driven by AudioInputDevice::AudioThreadCallback if | |
434 // |source_| is AudioInputDevice, otherwise it is driven by client's | |
435 // CaptureCallback. | |
436 #if defined(OS_WIN) || defined(OS_MACOSX) | |
437 DCHECK_LE(volume, 1.0); | |
438 #elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD) | |
439 // We have a special situation on Linux where the microphone volume can be | |
440 // "higher than maximum". The input volume slider in the sound preference | |
441 // allows the user to set a scaling that is higher than 100%. It means that | |
442 // even if the reported maximum levels is N, the actual microphone level can | |
443 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. | |
444 DCHECK_LE(volume, 1.6); | |
445 #endif | |
446 | |
447 // TODO(miu): Plumbing is needed to determine the actual capture timestamp | |
448 // of the audio, instead of just snapshotting TimeTicks::Now(), for proper | |
449 // audio/video sync. http://crbug.com/335335 | |
450 const base::TimeTicks reference_clock_snapshot = base::TimeTicks::Now(); | |
451 | |
452 TrackList::ItemList tracks; | |
453 TrackList::ItemList tracks_to_notify_format; | |
454 int current_volume = 0; | |
455 { | |
456 base::AutoLock auto_lock(lock_); | |
457 if (!running_) | |
458 return; | |
459 | |
460 // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC. | |
461 // The volume can be higher than 255 on Linux, and it will be cropped to | |
462 // 255 since AGC does not allow values out of range. | |
463 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); | |
464 current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_; | |
465 tracks = tracks_.Items(); | |
466 tracks_.RetrieveAndClearTags(&tracks_to_notify_format); | |
467 } | |
468 | |
469 // Sanity-check the input audio format in debug builds. Then, notify the | |
470 // tracks if the format has changed. | |
471 // | |
472 // Locking is not needed here to read the audio input/output parameters | |
473 // because the audio processor format changes only occur while audio capture | |
474 // is stopped. | |
475 DCHECK(audio_processor_->InputFormat().IsValid()); | |
476 DCHECK_EQ(audio_source->channels(), | |
477 audio_processor_->InputFormat().channels()); | |
478 DCHECK_EQ(audio_source->frames(), | |
479 audio_processor_->InputFormat().frames_per_buffer()); | |
480 if (!tracks_to_notify_format.empty()) { | |
481 const media::AudioParameters& output_params = | |
482 audio_processor_->OutputFormat(); | |
483 for (const auto& track : tracks_to_notify_format) | |
484 track->OnSetFormat(output_params); | |
485 } | |
486 | |
487 // Figure out if the pre-processed data has any energy or not. This | |
488 // information will be passed to the level calculator to force it to report | |
489 // energy in case the post-processed data is zeroed by the audio processing. | |
490 const bool force_report_nonzero_energy = !audio_source->AreFramesZero(); | |
491 | |
492 // Push the data to the processor for processing. | |
493 audio_processor_->PushCaptureData( | |
494 *audio_source, | |
495 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); | |
496 | |
497 // Process and consume the data in the processor until there is not enough | |
498 // data in the processor. | |
499 media::AudioBus* processed_data = nullptr; | |
500 base::TimeDelta processed_data_audio_delay; | |
501 int new_volume = 0; | |
502 while (audio_processor_->ProcessAndConsumeData( | |
503 current_volume, key_pressed, | |
504 &processed_data, &processed_data_audio_delay, &new_volume)) { | |
505 DCHECK(processed_data); | |
506 | |
507 level_calculator_.Calculate(*processed_data, force_report_nonzero_energy); | |
508 | |
509 const base::TimeTicks processed_data_capture_time = | |
510 reference_clock_snapshot - processed_data_audio_delay; | |
511 for (const auto& track : tracks) | |
512 track->Capture(*processed_data, processed_data_capture_time); | |
513 | |
514 if (new_volume) { | |
515 SetVolume(new_volume); | |
516 | |
517 // Update the |current_volume| to avoid passing the old volume to AGC. | |
518 current_volume = new_volume; | |
519 } | |
520 } | |
521 } | |
522 | |
523 void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { | |
524 WebRtcLogMessage("WAC::OnCaptureError: " + message); | |
525 } | |
526 | |
527 media::AudioParameters WebRtcAudioCapturer::GetInputFormat() const { | |
528 base::AutoLock auto_lock(lock_); | |
529 return audio_processor_->InputFormat(); | |
530 } | |
531 | |
532 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { | |
533 DCHECK(thread_checker_.CalledOnValidThread()); | |
534 #if defined(OS_ANDROID) | |
535 // TODO(henrika): Tune and adjust buffer size on Android. | |
536 return (2 * sample_rate / 100); | |
537 #endif | |
538 | |
539 // PeerConnection is running at a buffer size of 10ms data. A multiple of | |
540 // 10ms as the buffer size can give the best performance to PeerConnection. | |
541 int peer_connection_buffer_size = sample_rate / 100; | |
542 | |
543 // Use the native hardware buffer size in non peer connection mode when the | |
544 // platform is using a native buffer size smaller than the PeerConnection | |
545 // buffer size and audio processing is off. | |
546 int hardware_buffer_size = device_info_.device.input.frames_per_buffer; | |
547 if (!peer_connection_mode_ && hardware_buffer_size && | |
548 hardware_buffer_size <= peer_connection_buffer_size && | |
549 !audio_processor_->has_audio_processing()) { | |
550 DVLOG(1) << "WebRtcAudioCapturer is using hardware buffer size " | |
551 << hardware_buffer_size; | |
552 return hardware_buffer_size; | |
553 } | |
554 | |
555 return (sample_rate / 100); | |
556 } | |
557 | |
558 void WebRtcAudioCapturer::SetCapturerSource( | |
559 const scoped_refptr<media::AudioCapturerSource>& source, | |
560 media::AudioParameters params) { | |
561 // Create a new audio stream as source which uses the new source. | |
562 SetCapturerSourceInternal(source, params.channel_layout(), | |
563 params.sample_rate()); | |
564 } | |
565 | |
566 } // namespace content | |
OLD | NEW |