OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_capturer.h" | 5 #include "content/renderer/media/webrtc_audio_capturer.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
10 #include "base/strings/string_util.h" | 10 #include "base/strings/string_util.h" |
11 #include "content/child/child_process.h" | 11 #include "content/child/child_process.h" |
12 #include "content/renderer/media/audio_device_factory.h" | 12 #include "content/renderer/media/audio_device_factory.h" |
13 #include "content/renderer/media/webrtc_audio_device_impl.h" | 13 #include "content/renderer/media/webrtc_audio_device_impl.h" |
| 14 #include "content/renderer/media/webrtc_audio_processing_wrapper.h" |
14 #include "content/renderer/media/webrtc_local_audio_track.h" | 15 #include "content/renderer/media/webrtc_local_audio_track.h" |
15 #include "media/audio/sample_rates.h" | 16 #include "media/audio/sample_rates.h" |
16 | 17 |
17 namespace content { | 18 namespace content { |
18 | 19 |
19 namespace { | 20 namespace { |
20 | 21 |
21 // Supported hardware sample rates for input and output sides. | 22 // Supported hardware sample rates for input and output sides. |
22 #if defined(OS_WIN) || defined(OS_MACOSX) | 23 #if defined(OS_WIN) || defined(OS_MACOSX) |
23 // media::GetAudioInputHardwareSampleRate() asks the audio layer | 24 // media::GetAudioInputHardwareSampleRate() asks the audio layer |
(...skipping 11 matching lines...) Expand all Loading... |
35 | 36 |
36 } // namespace | 37 } // namespace |
37 | 38 |
38 // Reference counted container of WebRtcLocalAudioTrack delegate. | 39 // Reference counted container of WebRtcLocalAudioTrack delegate. |
39 class WebRtcAudioCapturer::TrackOwner | 40 class WebRtcAudioCapturer::TrackOwner |
40 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { | 41 : public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> { |
41 public: | 42 public: |
42 explicit TrackOwner(WebRtcLocalAudioTrack* track) | 43 explicit TrackOwner(WebRtcLocalAudioTrack* track) |
43 : delegate_(track) {} | 44 : delegate_(track) {} |
44 | 45 |
45 void Capture(media::AudioBus* audio_source, | 46 void Capture(const int16* data, |
46 int audio_delay_milliseconds, | 47 int sample_rate, |
47 double volume, | 48 int number_of_channels, |
48 bool key_pressed) { | 49 int number_of_frames) { |
49 base::AutoLock lock(lock_); | 50 base::AutoLock lock(lock_); |
50 if (delegate_) { | 51 if (delegate_) { |
51 delegate_->Capture(audio_source, | 52 delegate_->Capture(data, |
52 audio_delay_milliseconds, | 53 sample_rate, |
53 volume, | 54 number_of_channels, |
54 key_pressed); | 55 number_of_frames); |
55 } | 56 } |
56 } | 57 } |
57 | 58 |
58 void SetCaptureFormat(const media::AudioParameters& params) { | 59 void SetCaptureFormat(const media::AudioParameters& params) { |
59 base::AutoLock lock(lock_); | 60 base::AutoLock lock(lock_); |
60 if (delegate_) | 61 if (delegate_) |
61 delegate_->SetCaptureFormat(params); | 62 delegate_->SetCaptureFormat(params); |
62 } | 63 } |
63 | 64 |
64 void Reset() { | 65 void Reset() { |
(...skipping 29 matching lines...) Expand all Loading... |
94 | 95 |
95 DISALLOW_COPY_AND_ASSIGN(TrackOwner); | 96 DISALLOW_COPY_AND_ASSIGN(TrackOwner); |
96 }; | 97 }; |
97 | 98 |
98 // static | 99 // static |
99 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() { | 100 scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() { |
100 scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(); | 101 scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(); |
101 return capturer; | 102 return capturer; |
102 } | 103 } |
103 | 104 |
104 void WebRtcAudioCapturer::Reconfigure(int sample_rate, | 105 void WebRtcAudioCapturer::Reconfigure( |
105 media::ChannelLayout channel_layout) { | 106 int sample_rate, |
| 107 media::ChannelLayout channel_layout, |
| 108 const webrtc::MediaConstraintsInterface* constraints) { |
106 DCHECK(thread_checker_.CalledOnValidThread()); | 109 DCHECK(thread_checker_.CalledOnValidThread()); |
107 int buffer_size = GetBufferSize(sample_rate); | 110 int buffer_size = GetBufferSize(sample_rate); |
108 DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size; | 111 DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size; |
109 | 112 |
110 media::AudioParameters::Format format = | 113 media::AudioParameters::Format format = |
111 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | 114 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; |
112 | 115 |
113 // bits_per_sample is always 16 for now. | 116 // bits_per_sample is always 16 for now. |
114 int bits_per_sample = 16; | 117 int bits_per_sample = 16; |
115 media::AudioParameters params(format, channel_layout, sample_rate, | 118 media::AudioParameters params(format, channel_layout, sample_rate, |
116 bits_per_sample, buffer_size); | 119 bits_per_sample, buffer_size); |
117 | 120 |
| 121 audio_processing_.reset(new WebRtcAudioProcessingWrapper()); |
| 122 audio_processing_->Configure(params, constraints); |
| 123 sink_params_ = audio_processing_->OutputFormat(); |
| 124 |
118 TrackList tracks; | 125 TrackList tracks; |
119 { | 126 { |
120 base::AutoLock auto_lock(lock_); | 127 base::AutoLock auto_lock(lock_); |
121 tracks = tracks_; | 128 tracks = tracks_; |
122 params_ = params; | 129 source_params_ = params; |
123 } | 130 } |
124 | 131 |
125 // Tell all audio_tracks which format we use. | 132 // Tell all audio_tracks which format we use. |
126 for (TrackList::const_iterator it = tracks.begin(); | 133 for (TrackList::const_iterator it = tracks.begin(); |
127 it != tracks.end(); ++it) | 134 it != tracks.end(); ++it) |
128 (*it)->SetCaptureFormat(params); | 135 (*it)->SetCaptureFormat(sink_params_); |
129 } | 136 } |
130 | 137 |
131 bool WebRtcAudioCapturer::Initialize(int render_view_id, | 138 bool WebRtcAudioCapturer::Initialize( |
132 media::ChannelLayout channel_layout, | 139 int render_view_id, |
133 int sample_rate, | 140 media::ChannelLayout channel_layout, |
134 int buffer_size, | 141 int sample_rate, |
135 int session_id, | 142 int buffer_size, |
136 const std::string& device_id, | 143 int session_id, |
137 int paired_output_sample_rate, | 144 const std::string& device_id, |
138 int paired_output_frames_per_buffer) { | 145 int paired_output_sample_rate, |
| 146 int paired_output_frames_per_buffer, |
| 147 const webrtc::MediaConstraintsInterface* constraints) { |
139 DCHECK(thread_checker_.CalledOnValidThread()); | 148 DCHECK(thread_checker_.CalledOnValidThread()); |
140 DCHECK_GE(render_view_id, 0); | 149 DCHECK_GE(render_view_id, 0); |
141 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; | 150 DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; |
142 | 151 |
143 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; | 152 DVLOG(1) << "Audio input hardware channel layout: " << channel_layout; |
144 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", | 153 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout", |
145 channel_layout, media::CHANNEL_LAYOUT_MAX); | 154 channel_layout, media::CHANNEL_LAYOUT_MAX); |
146 | 155 |
147 render_view_id_ = render_view_id; | 156 render_view_id_ = render_view_id; |
148 session_id_ = session_id; | 157 session_id_ = session_id; |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
180 &kValidInputRates[0] + arraysize(kValidInputRates), | 189 &kValidInputRates[0] + arraysize(kValidInputRates), |
181 sample_rate) == | 190 sample_rate) == |
182 &kValidInputRates[arraysize(kValidInputRates)]) { | 191 &kValidInputRates[arraysize(kValidInputRates)]) { |
183 DLOG(ERROR) << sample_rate << " is not a supported input rate."; | 192 DLOG(ERROR) << sample_rate << " is not a supported input rate."; |
184 return false; | 193 return false; |
185 } | 194 } |
186 | 195 |
187 // Create and configure the default audio capturing source. The |source_| | 196 // Create and configure the default audio capturing source. The |source_| |
188 // will be overwritten if an external client later calls SetCapturerSource() | 197 // will be overwritten if an external client later calls SetCapturerSource() |
189 // providing an alternative media::AudioCapturerSource. | 198 // providing an alternative media::AudioCapturerSource. |
190 SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), | 199 InitializeCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), |
191 channel_layout, | 200 channel_layout, |
192 static_cast<float>(sample_rate)); | 201 static_cast<float>(sample_rate), |
| 202 constraints); |
193 | 203 |
194 return true; | 204 return true; |
195 } | 205 } |
196 | 206 |
197 WebRtcAudioCapturer::WebRtcAudioCapturer() | 207 WebRtcAudioCapturer::WebRtcAudioCapturer() |
198 : source_(NULL), | 208 : source_(NULL), |
199 running_(false), | 209 running_(false), |
200 agc_is_enabled_(false), | 210 agc_is_enabled_(false), |
201 render_view_id_(-1), | 211 render_view_id_(-1), |
202 hardware_buffer_size_(0), | 212 hardware_buffer_size_(0), |
(...skipping 20 matching lines...) Expand all Loading... |
223 | 233 |
224 // Start the source if the first audio track is connected to the capturer. | 234 // Start the source if the first audio track is connected to the capturer. |
225 // Start() will do nothing if the capturer has already been started. | 235 // Start() will do nothing if the capturer has already been started. |
226 Start(); | 236 Start(); |
227 | 237 |
228 base::AutoLock auto_lock(lock_); | 238 base::AutoLock auto_lock(lock_); |
229 // Verify that |track| is not already added to the list. | 239 // Verify that |track| is not already added to the list. |
230 DCHECK(std::find_if(tracks_.begin(), tracks_.end(), | 240 DCHECK(std::find_if(tracks_.begin(), tracks_.end(), |
231 TrackOwner::TrackWrapper(track)) == tracks_.end()); | 241 TrackOwner::TrackWrapper(track)) == tracks_.end()); |
232 | 242 |
233 track->SetCaptureFormat(params_); | 243 track->SetCaptureFormat(sink_params_); |
234 tracks_.push_back(new WebRtcAudioCapturer::TrackOwner(track)); | 244 tracks_.push_back(new WebRtcAudioCapturer::TrackOwner(track)); |
235 } | 245 } |
236 | 246 |
237 void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { | 247 void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { |
238 DCHECK(thread_checker_.CalledOnValidThread()); | 248 DCHECK(thread_checker_.CalledOnValidThread()); |
239 | 249 |
240 bool stop_source = false; | 250 bool stop_source = false; |
241 { | 251 { |
242 base::AutoLock auto_lock(lock_); | 252 base::AutoLock auto_lock(lock_); |
243 // Get iterator to the first element for which WrapsSink(track) returns | 253 // Get iterator to the first element for which WrapsSink(track) returns |
244 // true. | 254 // true. |
245 TrackList::iterator it = std::find_if( | 255 TrackList::iterator it = std::find_if( |
246 tracks_.begin(), tracks_.end(), TrackOwner::TrackWrapper(track)); | 256 tracks_.begin(), tracks_.end(), TrackOwner::TrackWrapper(track)); |
247 if (it != tracks_.end()) { | 257 if (it != tracks_.end()) { |
248 // Clear the delegate to ensure that no more capture callbacks will | 258 // Clear the delegate to ensure that no more capture callbacks will |
249 // be sent to this sink. Also avoids a possible crash which can happen | 259 // be sent to this sink. Also avoids a possible crash which can happen |
250 // if this method is called while capturing is active. | 260 // if this method is called while capturing is active. |
251 (*it)->Reset(); | 261 (*it)->Reset(); |
252 tracks_.erase(it); | 262 tracks_.erase(it); |
253 } | 263 } |
254 | 264 |
255 // Stop the source if the last audio track is going away. | 265 // Stop the source if the last audio track is going away. |
256 stop_source = tracks_.empty(); | 266 stop_source = tracks_.empty(); |
257 } | 267 } |
258 | 268 |
259 if (stop_source) | 269 if (stop_source) |
260 Stop(); | 270 Stop(); |
261 } | 271 } |
262 | 272 |
263 void WebRtcAudioCapturer::SetCapturerSource( | 273 void WebRtcAudioCapturer::InitializeCapturerSource( |
264 const scoped_refptr<media::AudioCapturerSource>& source, | 274 const scoped_refptr<media::AudioCapturerSource>& source, |
265 media::ChannelLayout channel_layout, | 275 media::ChannelLayout channel_layout, |
266 float sample_rate) { | 276 float sample_rate, |
| 277 const webrtc::MediaConstraintsInterface* constraints) { |
267 DCHECK(thread_checker_.CalledOnValidThread()); | 278 DCHECK(thread_checker_.CalledOnValidThread()); |
268 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," | 279 DVLOG(1) << "InitializeCapturerSource(channel_layout=" << channel_layout |
269 << "sample_rate=" << sample_rate << ")"; | 280 << "," << "sample_rate=" << sample_rate << ")"; |
270 scoped_refptr<media::AudioCapturerSource> old_source; | 281 scoped_refptr<media::AudioCapturerSource> old_source; |
271 bool restart_source = false; | 282 bool restart_source = false; |
272 { | 283 { |
273 base::AutoLock auto_lock(lock_); | 284 base::AutoLock auto_lock(lock_); |
274 if (source_.get() == source.get()) | 285 if (source_.get() == source.get()) |
275 return; | 286 return; |
276 | 287 |
277 source_.swap(old_source); | 288 source_.swap(old_source); |
278 source_ = source; | 289 source_ = source; |
279 | 290 |
280 // Reset the flag to allow starting the new source. | 291 // Reset the flag to allow starting the new source. |
281 restart_source = running_; | 292 restart_source = running_; |
282 running_ = false; | 293 running_ = false; |
283 } | 294 } |
284 | 295 |
285 DVLOG(1) << "Switching to a new capture source."; | 296 DVLOG(1) << "Switching to a new capture source."; |
286 if (old_source.get()) | 297 if (old_source.get()) |
287 old_source->Stop(); | 298 old_source->Stop(); |
288 | 299 |
289 // Dispatch the new parameters both to the sink(s) and to the new source. | 300 // Dispatch the new parameters both to the sink(s) and to the new source. |
290 // The idea is to get rid of any dependency of the microphone parameters | 301 // The idea is to get rid of any dependency of the microphone parameters |
291 // which would normally be used by default. | 302 // which would normally be used by default. |
292 Reconfigure(sample_rate, channel_layout); | 303 Reconfigure(sample_rate, channel_layout, constraints); |
293 | 304 |
294 // Make sure to grab the new parameters in case they were reconfigured. | 305 // Make sure to grab the new parameters in case they were reconfigured. |
295 media::AudioParameters params = audio_parameters(); | 306 media::AudioParameters params = audio_parameters(); |
296 source_provider_->Initialize(params); | 307 source_provider_->Initialize(params); |
297 if (source.get()) | 308 if (source.get()) |
298 source->Initialize(params, this, session_id_); | 309 source->Initialize(params, this, session_id_); |
299 | 310 |
300 if (restart_source) | 311 if (restart_source) |
301 Start(); | 312 Start(); |
302 } | 313 } |
(...skipping 17 matching lines...) Expand all Loading... |
320 render_view_id = render_view_id_; | 331 render_view_id = render_view_id_; |
321 } | 332 } |
322 | 333 |
323 // Do nothing if the current buffer size is the WebRtc native buffer size. | 334 // Do nothing if the current buffer size is the WebRtc native buffer size. |
324 media::AudioParameters params = audio_parameters(); | 335 media::AudioParameters params = audio_parameters(); |
325 if (GetBufferSize(params.sample_rate()) == params.frames_per_buffer()) | 336 if (GetBufferSize(params.sample_rate()) == params.frames_per_buffer()) |
326 return; | 337 return; |
327 | 338 |
328 // Create a new audio stream as source which will open the hardware using | 339 // Create a new audio stream as source which will open the hardware using |
329 // WebRtc native buffer size. | 340 // WebRtc native buffer size. |
330 SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), | 341 InitializeCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), |
331 params.channel_layout(), | 342 params.channel_layout(), |
332 static_cast<float>(params.sample_rate())); | 343 static_cast<float>(params.sample_rate()), |
| 344 NULL); |
333 } | 345 } |
334 | 346 |
| 347 void WebRtcAudioCapturer::FeedRenderDataToAudioProcessing( |
| 348 const int16* render_audio, |
| 349 int sample_rate, |
| 350 int number_of_channels, |
| 351 int number_of_frames, |
| 352 int render_delay_ms) { |
| 353 audio_processing_->FeedRenderDataToAudioProcessing(render_audio, |
| 354 sample_rate, |
| 355 number_of_channels, |
| 356 number_of_frames, |
| 357 render_delay_ms); |
| 358 |
| 359 } |
335 void WebRtcAudioCapturer::Start() { | 360 void WebRtcAudioCapturer::Start() { |
336 DVLOG(1) << "WebRtcAudioCapturer::Start()"; | 361 DVLOG(1) << "WebRtcAudioCapturer::Start()"; |
337 base::AutoLock auto_lock(lock_); | 362 base::AutoLock auto_lock(lock_); |
338 if (running_) | 363 if (running_) |
339 return; | 364 return; |
340 | 365 |
341 // Start the data source, i.e., start capturing data from the current source. | 366 // Start the data source, i.e., start capturing data from the current source. |
342 // Note that, the source does not have to be a microphone. | 367 // Note that, the source does not have to be a microphone. |
343 if (source_.get()) { | 368 if (source_.get()) { |
344 // We need to set the AGC control before starting the stream. | 369 // We need to set the AGC control before starting the stream. |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
405 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | 430 #elif defined(OS_LINUX) || defined(OS_OPENBSD) |
406 // We have a special situation on Linux where the microphone volume can be | 431 // We have a special situation on Linux where the microphone volume can be |
407 // "higher than maximum". The input volume slider in the sound preference | 432 // "higher than maximum". The input volume slider in the sound preference |
408 // allows the user to set a scaling that is higher than 100%. It means that | 433 // allows the user to set a scaling that is higher than 100%. It means that |
409 // even if the reported maximum levels is N, the actual microphone level can | 434 // even if the reported maximum levels is N, the actual microphone level can |
410 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. | 435 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. |
411 DCHECK_LE(volume, 1.6); | 436 DCHECK_LE(volume, 1.6); |
412 #endif | 437 #endif |
413 | 438 |
414 TrackList tracks; | 439 TrackList tracks; |
415 int current_volume = 0; | |
416 { | 440 { |
417 base::AutoLock auto_lock(lock_); | 441 base::AutoLock auto_lock(lock_); |
418 if (!running_) | 442 if (!running_) |
419 return; | 443 return; |
420 | 444 |
421 // Map internal volume range of [0.0, 1.0] into [0, 255] used by the | 445 // Map internal volume range of [0.0, 1.0] into [0, 255] used by the |
422 // webrtc::VoiceEngine. webrtc::VoiceEngine will handle the case when the | 446 // webrtc::VoiceEngine. webrtc::VoiceEngine will handle the case when the |
423 // volume is higher than 255. | 447 // volume is higher than 255. |
424 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); | 448 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); |
425 current_volume = volume_; | |
426 tracks = tracks_; | 449 tracks = tracks_; |
427 } | 450 } |
428 | 451 |
429 // Deliver captured data to source provider, which stores the data into FIFO | 452 audio_processing_->Push(audio_source); |
430 // for WebAudio to fetch. | 453 int current_volume = volume; |
431 source_provider_->DeliverData(audio_source, audio_delay_milliseconds, | 454 while (audio_processing_->ProcessAndConsume10MsData(audio_delay_milliseconds, |
432 current_volume, key_pressed); | 455 current_volume, |
| 456 key_pressed)) { |
| 457 // TODO(xians): Get the new volume and set it to |current_volume|. |
433 | 458 |
434 // Feed the data to the tracks. | 459 for (TrackList::const_iterator it = tracks.begin(); |
435 for (TrackList::const_iterator it = tracks.begin(); | 460 it != tracks.end(); ++it) { |
436 it != tracks.end(); | 461 (*it)->Capture(audio_processing_->OutputBuffer(), |
437 ++it) { | 462 sink_params_.sample_rate(), |
438 (*it)->Capture(audio_source, audio_delay_milliseconds, | 463 sink_params_.channels(), |
439 current_volume, key_pressed); | 464 sink_params_.frames_per_buffer()); |
| 465 } |
| 466 |
| 467 // TODO(xians): Make the source provider as one of the sinks. |
| 468 // Deliver captured data to source provider, which stores the data into FIFO |
| 469 // for WebAudio to fetch. |
| 470 source_provider_->DeliverData(audio_processing_->OutputBuffer(), |
| 471 sink_params_.sample_rate(), |
| 472 sink_params_.channels(), |
| 473 sink_params_.frames_per_buffer()); |
440 } | 474 } |
441 } | 475 } |
442 | 476 |
443 void WebRtcAudioCapturer::OnCaptureError() { | 477 void WebRtcAudioCapturer::OnCaptureError() { |
444 NOTIMPLEMENTED(); | 478 NOTIMPLEMENTED(); |
445 } | 479 } |
446 | 480 |
447 media::AudioParameters WebRtcAudioCapturer::audio_parameters() const { | 481 media::AudioParameters WebRtcAudioCapturer::audio_parameters() const { |
448 base::AutoLock auto_lock(lock_); | 482 base::AutoLock auto_lock(lock_); |
449 return params_; | 483 return source_params_; |
450 } | 484 } |
451 | 485 |
452 bool WebRtcAudioCapturer::GetPairedOutputParameters( | 486 bool WebRtcAudioCapturer::GetPairedOutputParameters( |
453 int* session_id, | 487 int* session_id, |
454 int* output_sample_rate, | 488 int* output_sample_rate, |
455 int* output_frames_per_buffer) const { | 489 int* output_frames_per_buffer) const { |
456 *session_id = session_id_; | 490 *session_id = session_id_; |
457 *output_sample_rate = output_sample_rate_; | 491 *output_sample_rate = output_sample_rate_; |
458 *output_frames_per_buffer = output_frames_per_buffer_; | 492 *output_frames_per_buffer = output_frames_per_buffer_; |
459 return session_id_ > 0 && output_sample_rate_ > 0 && | 493 return session_id_ > 0 && output_sample_rate_ > 0 && |
(...skipping 12 matching lines...) Expand all Loading... |
472 if (!peer_connection_mode_ && hardware_buffer_size_) | 506 if (!peer_connection_mode_ && hardware_buffer_size_) |
473 return hardware_buffer_size_; | 507 return hardware_buffer_size_; |
474 #endif | 508 #endif |
475 | 509 |
476 // WebRtc is running at a buffer size of 10ms data. Use a multiple of 10ms | 510 // WebRtc is running at a buffer size of 10ms data. Use a multiple of 10ms |
477 // as the buffer size to achieve the best performance for WebRtc. | 511 // as the buffer size to achieve the best performance for WebRtc. |
478 return (sample_rate / 100); | 512 return (sample_rate / 100); |
479 } | 513 } |
480 | 514 |
481 } // namespace content | 515 } // namespace content |
OLD | NEW |