OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/media/webrtc_audio_capturer.h" | |
6 | |
7 #include "base/bind.h" | |
8 #include "base/logging.h" | |
9 #include "base/macros.h" | |
10 #include "base/metrics/histogram.h" | |
11 #include "base/strings/string_util.h" | |
12 #include "base/strings/stringprintf.h" | |
13 #include "build/build_config.h" | |
14 #include "content/child/child_process.h" | |
15 #include "content/renderer/media/audio_device_factory.h" | |
16 #include "content/renderer/media/media_stream_audio_processor.h" | |
17 #include "content/renderer/media/media_stream_audio_processor_options.h" | |
18 #include "content/renderer/media/media_stream_audio_source.h" | |
19 #include "content/renderer/media/media_stream_constraints_util.h" | |
20 #include "content/renderer/media/webrtc_audio_device_impl.h" | |
21 #include "content/renderer/media/webrtc_local_audio_track.h" | |
22 #include "content/renderer/media/webrtc_logging.h" | |
23 #include "media/audio/sample_rates.h" | |
24 | |
25 namespace content { | |
26 | |
27 namespace { | |
28 | |
29 // Audio buffer sizes are specified in milliseconds. | |
30 const char kAudioLatency[] = "latencyMs"; | |
31 const int kMinAudioLatencyMs = 0; | |
32 const int kMaxAudioLatencyMs = 10000; | |
33 | |
34 // Method to check if any of the data in |audio_source| has energy. | |
35 bool HasDataEnergy(const media::AudioBus& audio_source) { | |
36 for (int ch = 0; ch < audio_source.channels(); ++ch) { | |
37 const float* channel_ptr = audio_source.channel(ch); | |
38 for (int frame = 0; frame < audio_source.frames(); ++frame) { | |
39 if (channel_ptr[frame] != 0) | |
40 return true; | |
41 } | |
42 } | |
43 | |
44 // All the data is zero. | |
45 return false; | |
46 } | |
47 | |
48 } // namespace | |
49 | |
50 // Reference counted container of WebRtcLocalAudioTrack delegate. | |
51 // TODO(xians): Switch to MediaStreamAudioSinkOwner. | |
52 class WebRtcAudioCapturer::TrackOwner | |
53 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { | |
54 public: | |
55 explicit TrackOwner(WebRtcLocalAudioTrack* track) | |
56 : delegate_(track) {} | |
57 | |
58 void Capture(const media::AudioBus& audio_bus, | |
59 base::TimeTicks estimated_capture_time, | |
60 bool force_report_nonzero_energy) { | |
61 base::AutoLock lock(lock_); | |
62 if (delegate_) { | |
63 delegate_->Capture(audio_bus, | |
64 estimated_capture_time, | |
65 force_report_nonzero_energy); | |
66 } | |
67 } | |
68 | |
69 void OnSetFormat(const media::AudioParameters& params) { | |
70 base::AutoLock lock(lock_); | |
71 if (delegate_) | |
72 delegate_->OnSetFormat(params); | |
73 } | |
74 | |
75 void SetAudioProcessor( | |
76 const scoped_refptr<MediaStreamAudioProcessor>& processor) { | |
77 base::AutoLock lock(lock_); | |
78 if (delegate_) | |
79 delegate_->SetAudioProcessor(processor); | |
80 } | |
81 | |
82 void Reset() { | |
83 base::AutoLock lock(lock_); | |
84 delegate_ = NULL; | |
85 } | |
86 | |
87 void Stop() { | |
88 base::AutoLock lock(lock_); | |
89 DCHECK(delegate_); | |
90 | |
91 // This can be reentrant so reset |delegate_| before calling out. | |
92 WebRtcLocalAudioTrack* temp = delegate_; | |
93 delegate_ = NULL; | |
94 temp->Stop(); | |
95 } | |
96 | |
97 // Wrapper which allows to use std::find_if() when adding and removing | |
98 // sinks to/from the list. | |
99 struct TrackWrapper { | |
100 explicit TrackWrapper(WebRtcLocalAudioTrack* track) : track_(track) {} | |
101 bool operator()( | |
102 const scoped_refptr<WebRtcAudioCapturer::TrackOwner>& owner) const { | |
103 return owner->IsEqual(track_); | |
104 } | |
105 WebRtcLocalAudioTrack* track_; | |
106 }; | |
107 | |
108 protected: | |
109 virtual ~TrackOwner() {} | |
110 | |
111 private: | |
112 friend class base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner>; | |
113 | |
114 bool IsEqual(const WebRtcLocalAudioTrack* other) const { | |
115 base::AutoLock lock(lock_); | |
116 return (other == delegate_); | |
117 } | |
118 | |
119 // Do NOT reference count the |delegate_| to avoid cyclic reference counting. | |
120 WebRtcLocalAudioTrack* delegate_; | |
121 mutable base::Lock lock_; | |
122 | |
123 DISALLOW_COPY_AND_ASSIGN(TrackOwner); | |
124 }; | |
125 | |
126 // static | |
127 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( | |
128 int render_frame_id, | |
129 const StreamDeviceInfo& device_info, | |
130 const blink::WebMediaConstraints& constraints, | |
131 WebRtcAudioDeviceImpl* audio_device, | |
132 MediaStreamAudioSource* audio_source) { | |
133 scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer( | |
134 render_frame_id, device_info, constraints, audio_device, audio_source); | |
135 if (capturer->Initialize()) | |
136 return capturer; | |
137 | |
138 return NULL; | |
139 } | |
140 | |
141 bool WebRtcAudioCapturer::Initialize() { | |
142 DCHECK(thread_checker_.CalledOnValidThread()); | |
143 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; | |
144 WebRtcLogMessage(base::StringPrintf( | |
145 "WAC::Initialize. render_frame_id=%d" | |
146 ", channel_layout=%d, sample_rate=%d, buffer_size=%d" | |
147 ", session_id=%d, paired_output_sample_rate=%d" | |
148 ", paired_output_frames_per_buffer=%d, effects=%d. ", | |
149 render_frame_id_, device_info_.device.input.channel_layout, | |
150 device_info_.device.input.sample_rate, | |
151 device_info_.device.input.frames_per_buffer, device_info_.session_id, | |
152 device_info_.device.matched_output.sample_rate, | |
153 device_info_.device.matched_output.frames_per_buffer, | |
154 device_info_.device.input.effects)); | |
155 | |
156 if (render_frame_id_ == -1) { | |
157 // Return true here to allow injecting a new source via | |
158 // SetCapturerSourceForTesting() at a later state. | |
159 return true; | |
160 } | |
161 | |
162 MediaAudioConstraints audio_constraints(constraints_, | |
163 device_info_.device.input.effects); | |
164 if (!audio_constraints.IsValid()) | |
165 return false; | |
166 | |
167 media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>( | |
168 device_info_.device.input.channel_layout); | |
169 | |
170 // If KEYBOARD_MIC effect is set, change the layout to the corresponding | |
171 // layout that includes the keyboard mic. | |
172 if ((device_info_.device.input.effects & | |
173 media::AudioParameters::KEYBOARD_MIC) && | |
174 audio_constraints.GetProperty( | |
175 MediaAudioConstraints::kGoogExperimentalNoiseSuppression)) { | |
176 if (channel_layout == media::CHANNEL_LAYOUT_STEREO) { | |
177 channel_layout = media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC; | |
178 DVLOG(1) << "Changed stereo layout to stereo + keyboard mic layout due " | |
179 << "to KEYBOARD_MIC effect."; | |
180 } else { | |
181 DVLOG(1) << "KEYBOARD_MIC effect ignored, not compatible with layout " | |
182 << channel_layout; | |
183 } | |
184 } | |
185 | |
186 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; | |
187 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", | |
188 channel_layout, media::CHANNEL_LAYOUT_MAX + 1); | |
189 | |
190 // Verify that the reported input channel configuration is supported. | |
191 if (channel_layout != media::CHANNEL_LAYOUT_MONO && | |
192 channel_layout != media::CHANNEL_LAYOUT_STEREO && | |
193 channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) { | |
194 DLOG(ERROR) << channel_layout | |
195 << " is not a supported input channel configuration."; | |
196 return false; | |
197 } | |
198 | |
199 DVLOG(1) << "Audio input hardware sample rate: " | |
200 << device_info_.device.input.sample_rate; | |
201 media::AudioSampleRate asr; | |
202 if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) { | |
203 UMA_HISTOGRAM_ENUMERATION( | |
204 "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1); | |
205 } else { | |
206 UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected", | |
207 device_info_.device.input.sample_rate); | |
208 } | |
209 | |
210 // Initialize the buffer size to zero, which means it wasn't specified. | |
211 // If it is out of range, we return it to zero. | |
212 int buffer_size_ms = 0; | |
213 int buffer_size_samples = 0; | |
214 GetConstraintValueAsInteger(constraints_, kAudioLatency, &buffer_size_ms); | |
215 if (buffer_size_ms < kMinAudioLatencyMs || | |
216 buffer_size_ms > kMaxAudioLatencyMs) { | |
217 DVLOG(1) << "Ignoring out of range buffer size " << buffer_size_ms; | |
218 } else { | |
219 buffer_size_samples = | |
220 device_info_.device.input.sample_rate * buffer_size_ms / 1000; | |
221 } | |
222 DVLOG_IF(1, buffer_size_samples > 0) | |
223 << "Custom audio buffer size: " << buffer_size_samples << " samples"; | |
224 | |
225 // Create and configure the default audio capturing source. | |
226 SetCapturerSourceInternal( | |
227 AudioDeviceFactory::NewInputDevice(render_frame_id_), | |
228 channel_layout, | |
229 device_info_.device.input.sample_rate, | |
230 buffer_size_samples); | |
231 | |
232 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware | |
233 // information from the capturer. | |
234 if (audio_device_) | |
235 audio_device_->AddAudioCapturer(this); | |
236 | |
237 return true; | |
238 } | |
239 | |
240 WebRtcAudioCapturer::WebRtcAudioCapturer( | |
241 int render_frame_id, | |
242 const StreamDeviceInfo& device_info, | |
243 const blink::WebMediaConstraints& constraints, | |
244 WebRtcAudioDeviceImpl* audio_device, | |
245 MediaStreamAudioSource* audio_source) | |
246 : constraints_(constraints), | |
247 audio_processor_(new rtc::RefCountedObject<MediaStreamAudioProcessor>( | |
248 constraints, | |
249 device_info.device.input, | |
250 audio_device)), | |
251 running_(false), | |
252 render_frame_id_(render_frame_id), | |
253 device_info_(device_info), | |
254 volume_(0), | |
255 peer_connection_mode_(false), | |
256 audio_device_(audio_device), | |
257 audio_source_(audio_source) { | |
258 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; | |
259 } | |
260 | |
261 WebRtcAudioCapturer::~WebRtcAudioCapturer() { | |
262 DCHECK(thread_checker_.CalledOnValidThread()); | |
263 DCHECK(tracks_.IsEmpty()); | |
264 DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()"; | |
265 Stop(); | |
266 } | |
267 | |
268 void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { | |
269 DCHECK(track); | |
270 DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; | |
271 | |
272 { | |
273 base::AutoLock auto_lock(lock_); | |
274 // Verify that |track| is not already added to the list. | |
275 DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track))); | |
276 | |
277 // Add with a tag, so we remember to call OnSetFormat() on the new | |
278 // track. | |
279 scoped_refptr<TrackOwner> track_owner(new TrackOwner(track)); | |
280 tracks_.AddAndTag(track_owner.get()); | |
281 } | |
282 } | |
283 | |
284 void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { | |
285 DCHECK(thread_checker_.CalledOnValidThread()); | |
286 DVLOG(1) << "WebRtcAudioCapturer::RemoveTrack()"; | |
287 bool stop_source = false; | |
288 { | |
289 base::AutoLock auto_lock(lock_); | |
290 | |
291 scoped_refptr<TrackOwner> removed_item = | |
292 tracks_.Remove(TrackOwner::TrackWrapper(track)); | |
293 | |
294 // Clear the delegate to ensure that no more capture callbacks will | |
295 // be sent to this sink. Also avoids a possible crash which can happen | |
296 // if this method is called while capturing is active. | |
297 if (removed_item.get()) { | |
298 removed_item->Reset(); | |
299 stop_source = tracks_.IsEmpty(); | |
300 } | |
301 } | |
302 if (stop_source) { | |
303 // Since WebRtcAudioCapturer does not inherit MediaStreamAudioSource, | |
304 // and instead MediaStreamAudioSource is composed of a WebRtcAudioCapturer, | |
305 // we have to call StopSource on the MediaStreamSource. This will call | |
306 // MediaStreamAudioSource::DoStopSource which in turn call | |
307 // WebRtcAudioCapturerer::Stop(); | |
308 audio_source_->StopSource(); | |
309 } | |
310 } | |
311 | |
312 void WebRtcAudioCapturer::SetCapturerSourceInternal( | |
313 const scoped_refptr<media::AudioCapturerSource>& source, | |
314 media::ChannelLayout channel_layout, | |
315 int sample_rate, | |
316 int buffer_size) { | |
317 DCHECK(thread_checker_.CalledOnValidThread()); | |
318 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," | |
319 << "sample_rate=" << sample_rate << ")"; | |
320 scoped_refptr<media::AudioCapturerSource> old_source; | |
321 { | |
322 base::AutoLock auto_lock(lock_); | |
323 if (source_.get() == source.get()) | |
324 return; | |
325 | |
326 source_.swap(old_source); | |
327 source_ = source; | |
328 | |
329 // Reset the flag to allow starting the new source. | |
330 running_ = false; | |
331 } | |
332 | |
333 DVLOG(1) << "Switching to a new capture source."; | |
334 if (old_source.get()) | |
335 old_source->Stop(); | |
336 | |
337 // If the buffer size is zero, it has not been specified. | |
338 // We either default to 10ms, or use the hardware buffer size. | |
339 if (buffer_size == 0) | |
340 buffer_size = GetBufferSize(sample_rate); | |
341 | |
342 // Dispatch the new parameters both to the sink(s) and to the new source, | |
343 // also apply the new |constraints|. | |
344 // The idea is to get rid of any dependency of the microphone parameters | |
345 // which would normally be used by default. | |
346 // bits_per_sample is always 16 for now. | |
347 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
348 channel_layout, sample_rate, 16, buffer_size); | |
349 params.set_effects(device_info_.device.input.effects); | |
350 | |
351 { | |
352 base::AutoLock auto_lock(lock_); | |
353 // Notify the |audio_processor_| of the new format. | |
354 audio_processor_->OnCaptureFormatChanged(params); | |
355 | |
356 // Notify all tracks about the new format. | |
357 tracks_.TagAll(); | |
358 } | |
359 | |
360 if (source.get()) | |
361 source->Initialize(params, this, session_id()); | |
362 | |
363 Start(); | |
364 } | |
365 | |
366 void WebRtcAudioCapturer::EnablePeerConnectionMode() { | |
367 DCHECK(thread_checker_.CalledOnValidThread()); | |
368 DVLOG(1) << "EnablePeerConnectionMode"; | |
369 // Do nothing if the peer connection mode has been enabled. | |
370 if (peer_connection_mode_) | |
371 return; | |
372 | |
373 peer_connection_mode_ = true; | |
374 int render_frame_id = -1; | |
375 media::AudioParameters input_params; | |
376 { | |
377 base::AutoLock auto_lock(lock_); | |
378 // Simply return if there is no existing source or the |render_frame_id_| is | |
379 // not valid. | |
380 if (!source_.get() || render_frame_id_ == -1) | |
381 return; | |
382 | |
383 render_frame_id = render_frame_id_; | |
384 input_params = audio_processor_->InputFormat(); | |
385 } | |
386 | |
387 // Do nothing if the current buffer size is the WebRtc native buffer size. | |
388 if (GetBufferSize(input_params.sample_rate()) == | |
389 input_params.frames_per_buffer()) { | |
390 return; | |
391 } | |
392 | |
393 // Create a new audio stream as source which will open the hardware using | |
394 // WebRtc native buffer size. | |
395 SetCapturerSourceInternal(AudioDeviceFactory::NewInputDevice(render_frame_id), | |
396 input_params.channel_layout(), | |
397 input_params.sample_rate(), | |
398 0); | |
399 } | |
400 | |
401 void WebRtcAudioCapturer::Start() { | |
402 DCHECK(thread_checker_.CalledOnValidThread()); | |
403 DVLOG(1) << "WebRtcAudioCapturer::Start()"; | |
404 base::AutoLock auto_lock(lock_); | |
405 if (running_ || !source_.get()) | |
406 return; | |
407 | |
408 // Start the data source, i.e., start capturing data from the current source. | |
409 // We need to set the AGC control before starting the stream. | |
410 source_->SetAutomaticGainControl(true); | |
411 source_->Start(); | |
412 running_ = true; | |
413 } | |
414 | |
415 void WebRtcAudioCapturer::Stop() { | |
416 DCHECK(thread_checker_.CalledOnValidThread()); | |
417 DVLOG(1) << "WebRtcAudioCapturer::Stop()"; | |
418 scoped_refptr<media::AudioCapturerSource> source; | |
419 TrackList::ItemList tracks; | |
420 { | |
421 base::AutoLock auto_lock(lock_); | |
422 if (!running_) | |
423 return; | |
424 | |
425 source = source_; | |
426 tracks = tracks_.Items(); | |
427 tracks_.Clear(); | |
428 running_ = false; | |
429 } | |
430 | |
431 // Remove the capturer object from the WebRtcAudioDeviceImpl. | |
432 if (audio_device_) | |
433 audio_device_->RemoveAudioCapturer(this); | |
434 | |
435 for (TrackList::ItemList::const_iterator it = tracks.begin(); | |
436 it != tracks.end(); | |
437 ++it) { | |
438 (*it)->Stop(); | |
439 } | |
440 | |
441 if (source.get()) | |
442 source->Stop(); | |
443 | |
444 // Stop the audio processor to avoid feeding render data into the processor. | |
445 audio_processor_->Stop(); | |
446 } | |
447 | |
448 void WebRtcAudioCapturer::SetVolume(int volume) { | |
449 DVLOG(1) << "WebRtcAudioCapturer::SetVolume()"; | |
450 DCHECK_LE(volume, MaxVolume()); | |
451 double normalized_volume = static_cast<double>(volume) / MaxVolume(); | |
452 base::AutoLock auto_lock(lock_); | |
453 if (source_.get()) | |
454 source_->SetVolume(normalized_volume); | |
455 } | |
456 | |
457 int WebRtcAudioCapturer::Volume() const { | |
458 base::AutoLock auto_lock(lock_); | |
459 return volume_; | |
460 } | |
461 | |
462 int WebRtcAudioCapturer::MaxVolume() const { | |
463 return WebRtcAudioDeviceImpl::kMaxVolumeLevel; | |
464 } | |
465 | |
466 media::AudioParameters WebRtcAudioCapturer::GetOutputFormat() const { | |
467 DCHECK(thread_checker_.CalledOnValidThread()); | |
468 return audio_processor_->OutputFormat(); | |
469 } | |
470 | |
471 void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, | |
472 int audio_delay_milliseconds, | |
473 double volume, | |
474 bool key_pressed) { | |
475 // This callback is driven by AudioInputDevice::AudioThreadCallback if | |
476 // |source_| is AudioInputDevice, otherwise it is driven by client's | |
477 // CaptureCallback. | |
478 #if defined(OS_WIN) || defined(OS_MACOSX) | |
479 DCHECK_LE(volume, 1.0); | |
480 #elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD) | |
481 // We have a special situation on Linux where the microphone volume can be | |
482 // "higher than maximum". The input volume slider in the sound preference | |
483 // allows the user to set a scaling that is higher than 100%. It means that | |
484 // even if the reported maximum levels is N, the actual microphone level can | |
485 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. | |
486 DCHECK_LE(volume, 1.6); | |
487 #endif | |
488 | |
489 // TODO(miu): Plumbing is needed to determine the actual capture timestamp | |
490 // of the audio, instead of just snapshotting TimeTicks::Now(), for proper | |
491 // audio/video sync. http://crbug.com/335335 | |
492 const base::TimeTicks reference_clock_snapshot = base::TimeTicks::Now(); | |
493 | |
494 TrackList::ItemList tracks; | |
495 TrackList::ItemList tracks_to_notify_format; | |
496 int current_volume = 0; | |
497 { | |
498 base::AutoLock auto_lock(lock_); | |
499 if (!running_) | |
500 return; | |
501 | |
502 // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC. | |
503 // The volume can be higher than 255 on Linux, and it will be cropped to | |
504 // 255 since AGC does not allow values out of range. | |
505 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); | |
506 current_volume = volume_ > MaxVolume() ? MaxVolume() : volume_; | |
507 tracks = tracks_.Items(); | |
508 tracks_.RetrieveAndClearTags(&tracks_to_notify_format); | |
509 } | |
510 | |
511 DCHECK(audio_processor_->InputFormat().IsValid()); | |
512 DCHECK_EQ(audio_source->channels(), | |
513 audio_processor_->InputFormat().channels()); | |
514 DCHECK_EQ(audio_source->frames(), | |
515 audio_processor_->InputFormat().frames_per_buffer()); | |
516 | |
517 // Notify the tracks on when the format changes. This will do nothing if | |
518 // |tracks_to_notify_format| is empty. | |
519 const media::AudioParameters& output_params = | |
520 audio_processor_->OutputFormat(); | |
521 for (const auto& track : tracks_to_notify_format) { | |
522 track->OnSetFormat(output_params); | |
523 track->SetAudioProcessor(audio_processor_); | |
524 } | |
525 | |
526 // Figure out if the pre-processed data has any energy or not, the | |
527 // information will be passed to the track to force the calculator | |
528 // to report energy in case the post-processed data is zeroed by the audio | |
529 // processing. | |
530 const bool force_report_nonzero_energy = HasDataEnergy(*audio_source); | |
531 | |
532 // Push the data to the processor for processing. | |
533 audio_processor_->PushCaptureData( | |
534 *audio_source, | |
535 base::TimeDelta::FromMilliseconds(audio_delay_milliseconds)); | |
536 | |
537 // Process and consume the data in the processor until there is not enough | |
538 // data in the processor. | |
539 media::AudioBus* processed_data = nullptr; | |
540 base::TimeDelta processed_data_audio_delay; | |
541 int new_volume = 0; | |
542 while (audio_processor_->ProcessAndConsumeData( | |
543 current_volume, key_pressed, | |
544 &processed_data, &processed_data_audio_delay, &new_volume)) { | |
545 DCHECK(processed_data); | |
546 const base::TimeTicks processed_data_capture_time = | |
547 reference_clock_snapshot - processed_data_audio_delay; | |
548 for (const auto& track : tracks) { | |
549 track->Capture(*processed_data, | |
550 processed_data_capture_time, | |
551 force_report_nonzero_energy); | |
552 } | |
553 | |
554 if (new_volume) { | |
555 SetVolume(new_volume); | |
556 | |
557 // Update the |current_volume| to avoid passing the old volume to AGC. | |
558 current_volume = new_volume; | |
559 } | |
560 } | |
561 } | |
562 | |
563 void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { | |
564 WebRtcLogMessage("WAC::OnCaptureError: " + message); | |
565 } | |
566 | |
567 media::AudioParameters WebRtcAudioCapturer::source_audio_parameters() const { | |
568 base::AutoLock auto_lock(lock_); | |
569 return audio_processor_.get() ? audio_processor_->InputFormat() | |
570 : media::AudioParameters(); | |
571 } | |
572 | |
573 bool WebRtcAudioCapturer::GetPairedOutputParameters( | |
574 int* session_id, | |
575 int* output_sample_rate, | |
576 int* output_frames_per_buffer) const { | |
577 // Don't set output parameters unless all of them are valid. | |
578 if (device_info_.session_id <= 0 || | |
579 !device_info_.device.matched_output.sample_rate || | |
580 !device_info_.device.matched_output.frames_per_buffer) | |
581 return false; | |
582 | |
583 *session_id = device_info_.session_id; | |
584 *output_sample_rate = device_info_.device.matched_output.sample_rate; | |
585 *output_frames_per_buffer = | |
586 device_info_.device.matched_output.frames_per_buffer; | |
587 | |
588 return true; | |
589 } | |
590 | |
591 int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { | |
592 DCHECK(thread_checker_.CalledOnValidThread()); | |
593 #if defined(OS_ANDROID) | |
594 // TODO(henrika): Tune and adjust buffer size on Android. | |
595 return (2 * sample_rate / 100); | |
596 #endif | |
597 | |
598 // PeerConnection is running at a buffer size of 10ms data. A multiple of | |
599 // 10ms as the buffer size can give the best performance to PeerConnection. | |
600 int peer_connection_buffer_size = sample_rate / 100; | |
601 | |
602 // Use the native hardware buffer size in non peer connection mode when the | |
603 // platform is using a native buffer size smaller than the PeerConnection | |
604 // buffer size and audio processing is off. | |
605 int hardware_buffer_size = device_info_.device.input.frames_per_buffer; | |
606 if (!peer_connection_mode_ && hardware_buffer_size && | |
607 hardware_buffer_size <= peer_connection_buffer_size && | |
608 !audio_processor_->has_audio_processing()) { | |
609 DVLOG(1) << "WebRtcAudioCapturer is using hardware buffer size " | |
610 << hardware_buffer_size; | |
611 return hardware_buffer_size; | |
612 } | |
613 | |
614 return (sample_rate / 100); | |
615 } | |
616 | |
617 void WebRtcAudioCapturer::SetCapturerSource( | |
618 const scoped_refptr<media::AudioCapturerSource>& source, | |
619 media::AudioParameters params) { | |
620 // Create a new audio stream as source which uses the new source. | |
621 SetCapturerSourceInternal(source, | |
622 params.channel_layout(), | |
623 params.sample_rate(), | |
624 0); | |
625 } | |
626 | |
627 } // namespace content | |
OLD | NEW |