Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(15)

Side by Side Diff: content/renderer/media/webrtc_audio_renderer.cc

Issue 1666363005: Switching audio clients to using RestartableAudioRendererSink interface as a sink. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: export fix Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/webrtc_audio_renderer.h" 5 #include "content/renderer/media/webrtc_audio_renderer.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "base/logging.h" 9 #include "base/logging.h"
10 #include "base/metrics/histogram.h" 10 #include "base/metrics/histogram.h"
11 #include "base/strings/string_util.h" 11 #include "base/strings/string_util.h"
12 #include "base/strings/stringprintf.h" 12 #include "base/strings/stringprintf.h"
13 #include "build/build_config.h" 13 #include "build/build_config.h"
14 #include "content/renderer/media/audio_device_factory.h" 14 #include "content/renderer/media/audio_device_factory.h"
15 #include "content/renderer/media/media_stream_audio_track.h" 15 #include "content/renderer/media/media_stream_audio_track.h"
16 #include "content/renderer/media/media_stream_dispatcher.h" 16 #include "content/renderer/media/media_stream_dispatcher.h"
17 #include "content/renderer/media/media_stream_track.h" 17 #include "content/renderer/media/media_stream_track.h"
18 #include "content/renderer/media/webrtc_audio_device_impl.h" 18 #include "content/renderer/media/webrtc_audio_device_impl.h"
19 #include "content/renderer/media/webrtc_logging.h" 19 #include "content/renderer/media/webrtc_logging.h"
20 #include "content/renderer/render_frame_impl.h" 20 #include "content/renderer/render_frame_impl.h"
21 #include "media/audio/audio_output_device.h"
22 #include "media/audio/audio_parameters.h" 21 #include "media/audio/audio_parameters.h"
23 #include "media/audio/sample_rates.h" 22 #include "media/audio/sample_rates.h"
24 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" 23 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
25 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" 24 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
26 #include "third_party/webrtc/media/base/audiorenderer.h" 25 #include "third_party/webrtc/media/base/audiorenderer.h"
27 26
28 #if defined(OS_WIN) 27 #if defined(OS_WIN)
29 #include "base/win/windows_version.h" 28 #include "base/win/windows_version.h"
30 #include "media/audio/win/core_audio_util_win.h" 29 #include "media/audio/win/core_audio_util_win.h"
31 #endif 30 #endif
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
214 DCHECK(thread_checker_.CalledOnValidThread()); 213 DCHECK(thread_checker_.CalledOnValidThread());
215 DCHECK(source); 214 DCHECK(source);
216 DCHECK(!sink_.get()); 215 DCHECK(!sink_.get());
217 DCHECK_GE(session_id_, 0); 216 DCHECK_GE(session_id_, 0);
218 { 217 {
219 base::AutoLock auto_lock(lock_); 218 base::AutoLock auto_lock(lock_);
220 DCHECK_EQ(state_, UNINITIALIZED); 219 DCHECK_EQ(state_, UNINITIALIZED);
221 DCHECK(!source_); 220 DCHECK(!source_);
222 } 221 }
223 222
224 sink_ = 223 sink_ = AudioDeviceFactory::NewOutputDevice(
225 AudioDeviceFactory::NewOutputDevice(source_render_frame_id_, session_id_, 224 AudioDeviceFactory::kSourceWebRTC, source_render_frame_id_, session_id_,
226 output_device_id_, security_origin_); 225 output_device_id_, security_origin_);
227 if (sink_->GetDeviceStatus() != media::OUTPUT_DEVICE_STATUS_OK) 226
227 if (sink_->GetOutputDevice()->GetDeviceStatus() !=
228 media::OUTPUT_DEVICE_STATUS_OK)
DaleCurtis 2016/02/10 23:25:33 Add {} around multi-line conditionals.
o1ka 2016/02/11 17:18:23 Done.
228 return false; 229 return false;
229 230
230 PrepareSink(); 231 PrepareSink();
231 { 232 {
232 // No need to reassert the preconditions because the other thread accessing 233 // No need to reassert the preconditions because the other thread accessing
233 // the fields (checked by |audio_renderer_thread_checker_|) only reads them. 234 // the fields (checked by |audio_renderer_thread_checker_|) only reads them.
234 base::AutoLock auto_lock(lock_); 235 base::AutoLock auto_lock(lock_);
235 source_ = source; 236 source_ = source;
236 237
237 // User must call Play() before any audio can be heard. 238 // User must call Play() before any audio can be heard.
238 state_ = PAUSED; 239 state_ = PAUSED;
239 } 240 }
240 sink_->Start(); 241 sink_->Start();
242 sink_->Play(); // Not all the sinks play on start.
241 243
242 return true; 244 return true;
243 } 245 }
244 246
245 scoped_refptr<MediaStreamAudioRenderer> 247 scoped_refptr<MediaStreamAudioRenderer>
246 WebRtcAudioRenderer::CreateSharedAudioRendererProxy( 248 WebRtcAudioRenderer::CreateSharedAudioRendererProxy(
247 const blink::WebMediaStream& media_stream) { 249 const blink::WebMediaStream& media_stream) {
248 content::SharedAudioRenderer::OnPlayStateChanged on_play_state_changed = 250 content::SharedAudioRenderer::OnPlayStateChanged on_play_state_changed =
249 base::Bind(&WebRtcAudioRenderer::OnPlayStateChanged, this); 251 base::Bind(&WebRtcAudioRenderer::OnPlayStateChanged, this);
250 return new SharedAudioRenderer(this, media_stream, on_play_state_changed); 252 return new SharedAudioRenderer(this, media_stream, on_play_state_changed);
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
373 const media::SwitchOutputDeviceCB& callback) { 375 const media::SwitchOutputDeviceCB& callback) {
374 DVLOG(1) << "WebRtcAudioRenderer::SwitchOutputDevice()"; 376 DVLOG(1) << "WebRtcAudioRenderer::SwitchOutputDevice()";
375 DCHECK(thread_checker_.CalledOnValidThread()); 377 DCHECK(thread_checker_.CalledOnValidThread());
376 DCHECK_GE(session_id_, 0); 378 DCHECK_GE(session_id_, 0);
377 { 379 {
378 base::AutoLock auto_lock(lock_); 380 base::AutoLock auto_lock(lock_);
379 DCHECK(source_); 381 DCHECK(source_);
380 DCHECK_NE(state_, UNINITIALIZED); 382 DCHECK_NE(state_, UNINITIALIZED);
381 } 383 }
382 384
383 scoped_refptr<media::AudioOutputDevice> new_sink = 385 scoped_refptr<media::AudioRendererSink> new_sink =
384 AudioDeviceFactory::NewOutputDevice(source_render_frame_id_, session_id_, 386 AudioDeviceFactory::NewOutputDevice(AudioDeviceFactory::kSourceWebRTC,
387 source_render_frame_id_, session_id_,
385 device_id, security_origin); 388 device_id, security_origin);
386 if (new_sink->GetDeviceStatus() != media::OUTPUT_DEVICE_STATUS_OK) { 389 if (new_sink->GetOutputDevice()->GetDeviceStatus() !=
387 callback.Run(new_sink->GetDeviceStatus()); 390 media::OUTPUT_DEVICE_STATUS_OK) {
391 callback.Run(new_sink->GetOutputDevice()->GetDeviceStatus());
388 return; 392 return;
389 } 393 }
390 394
391 // Make sure to stop the sink while _not_ holding the lock since the Render() 395 // Make sure to stop the sink while _not_ holding the lock since the Render()
392 // callback may currently be executing and trying to grab the lock while we're 396 // callback may currently be executing and trying to grab the lock while we're
393 // stopping the thread on which it runs. 397 // stopping the thread on which it runs.
394 sink_->Stop(); 398 sink_->Stop();
395 audio_renderer_thread_checker_.DetachFromThread(); 399 audio_renderer_thread_checker_.DetachFromThread();
396 sink_ = new_sink; 400 sink_ = new_sink;
397 output_device_id_ = device_id; 401 output_device_id_ = device_id;
398 security_origin_ = security_origin; 402 security_origin_ = security_origin;
399 { 403 {
400 base::AutoLock auto_lock(lock_); 404 base::AutoLock auto_lock(lock_);
401 source_->AudioRendererThreadStopped(); 405 source_->AudioRendererThreadStopped();
402 } 406 }
403 PrepareSink(); 407 PrepareSink();
404 sink_->Start(); 408 sink_->Start();
405 409
406 callback.Run(media::OUTPUT_DEVICE_STATUS_OK); 410 callback.Run(media::OUTPUT_DEVICE_STATUS_OK);
407 } 411 }
408 412
409 media::AudioParameters WebRtcAudioRenderer::GetOutputParameters() { 413 media::AudioParameters WebRtcAudioRenderer::GetOutputParameters() {
410 DCHECK(thread_checker_.CalledOnValidThread()); 414 DCHECK(thread_checker_.CalledOnValidThread());
411 if (!sink_.get()) 415 if (!sink_.get())
412 return media::AudioParameters(); 416 return media::AudioParameters();
413 417
414 return sink_->GetOutputParameters(); 418 return sink_->GetOutputDevice()->GetOutputParameters();
415 } 419 }
416 420
417 media::OutputDeviceStatus WebRtcAudioRenderer::GetDeviceStatus() { 421 media::OutputDeviceStatus WebRtcAudioRenderer::GetDeviceStatus() {
418 DCHECK(thread_checker_.CalledOnValidThread()); 422 DCHECK(thread_checker_.CalledOnValidThread());
419 if (!sink_.get()) 423 if (!sink_.get())
420 return media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL; 424 return media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL;
421 425
422 return sink_->GetDeviceStatus(); 426 return sink_->GetOutputDevice()->GetDeviceStatus();
423 } 427 }
424 428
425 int WebRtcAudioRenderer::Render(media::AudioBus* audio_bus, 429 int WebRtcAudioRenderer::Render(media::AudioBus* audio_bus,
426 uint32_t audio_delay_milliseconds, 430 uint32_t audio_delay_milliseconds,
427 uint32_t frames_skipped) { 431 uint32_t frames_skipped) {
428 DCHECK(audio_renderer_thread_checker_.CalledOnValidThread()); 432 DCHECK(audio_renderer_thread_checker_.CalledOnValidThread());
429 base::AutoLock auto_lock(lock_); 433 base::AutoLock auto_lock(lock_);
430 if (!source_) 434 if (!source_)
431 return 0; 435 return 0;
432 436
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after
611 { 615 {
612 base::AutoLock lock(lock_); 616 base::AutoLock lock(lock_);
613 new_sink_params = sink_params_; 617 new_sink_params = sink_params_;
614 } 618 }
615 // WebRTC does not yet support higher rates than 96000 on the client side 619 // WebRTC does not yet support higher rates than 96000 on the client side
616 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected, 620 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected,
617 // we change the rate to 48000 instead. The consequence is that the native 621 // we change the rate to 48000 instead. The consequence is that the native
618 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz 622 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz
619 // which will then be resampled by the audio converted on the browser side 623 // which will then be resampled by the audio converted on the browser side
620 // to match the native audio layer. 624 // to match the native audio layer.
621 int sample_rate = sink_->GetOutputParameters().sample_rate(); 625 int sample_rate =
626 sink_->GetOutputDevice()->GetOutputParameters().sample_rate();
622 DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; 627 DVLOG(1) << "Audio output hardware sample rate: " << sample_rate;
623 if (sample_rate >= 192000) { 628 if (sample_rate >= 192000) {
624 DVLOG(1) << "Resampling from 48000 to " << sample_rate << " is required"; 629 DVLOG(1) << "Resampling from 48000 to " << sample_rate << " is required";
625 sample_rate = 48000; 630 sample_rate = 48000;
626 } 631 }
627 media::AudioSampleRate asr; 632 media::AudioSampleRate asr;
628 if (media::ToAudioSampleRate(sample_rate, &asr)) { 633 if (media::ToAudioSampleRate(sample_rate, &asr)) {
629 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputSampleRate", asr, 634 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputSampleRate", asr,
630 media::kAudioSampleRateMax + 1); 635 media::kAudioSampleRateMax + 1);
631 } else { 636 } else {
632 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate); 637 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate);
633 } 638 }
634 639
635 // Calculate the frames per buffer for the source, i.e. the WebRTC client. We 640 // Calculate the frames per buffer for the source, i.e. the WebRTC client. We
636 // use 10 ms of data since the WebRTC client only supports multiples of 10 ms 641 // use 10 ms of data since the WebRTC client only supports multiples of 10 ms
637 // as buffer size where 10 ms is preferred for lowest possible delay. 642 // as buffer size where 10 ms is preferred for lowest possible delay.
638 const int source_frames_per_buffer = (sample_rate / 100); 643 const int source_frames_per_buffer = (sample_rate / 100);
639 DVLOG(1) << "Using WebRTC output buffer size: " << source_frames_per_buffer; 644 DVLOG(1) << "Using WebRTC output buffer size: " << source_frames_per_buffer;
640 645
641 // Setup sink parameters. 646 // Setup sink parameters.
642 const int sink_frames_per_buffer = GetOptimalBufferSize( 647 const int sink_frames_per_buffer = GetOptimalBufferSize(
643 sample_rate, sink_->GetOutputParameters().frames_per_buffer()); 648 sample_rate,
649 sink_->GetOutputDevice()->GetOutputParameters().frames_per_buffer());
644 new_sink_params.set_sample_rate(sample_rate); 650 new_sink_params.set_sample_rate(sample_rate);
645 new_sink_params.set_frames_per_buffer(sink_frames_per_buffer); 651 new_sink_params.set_frames_per_buffer(sink_frames_per_buffer);
646 652
647 // Create a FIFO if re-buffering is required to match the source input with 653 // Create a FIFO if re-buffering is required to match the source input with
648 // the sink request. The source acts as provider here and the sink as 654 // the sink request. The source acts as provider here and the sink as
649 // consumer. 655 // consumer.
650 const bool different_source_sink_frames = 656 const bool different_source_sink_frames =
651 source_frames_per_buffer != new_sink_params.frames_per_buffer(); 657 source_frames_per_buffer != new_sink_params.frames_per_buffer();
652 if (different_source_sink_frames) { 658 if (different_source_sink_frames) {
653 DVLOG(1) << "Rebuffering from " << source_frames_per_buffer << " to " 659 DVLOG(1) << "Rebuffering from " << source_frames_per_buffer << " to "
654 << new_sink_params.frames_per_buffer(); 660 << new_sink_params.frames_per_buffer();
655 } 661 }
656 { 662 {
657 base::AutoLock lock(lock_); 663 base::AutoLock lock(lock_);
658 if ((!audio_fifo_ && different_source_sink_frames) || 664 if ((!audio_fifo_ && different_source_sink_frames) ||
659 (audio_fifo_ && 665 (audio_fifo_ &&
660 audio_fifo_->SizeInFrames() != source_frames_per_buffer)) { 666 audio_fifo_->SizeInFrames() != source_frames_per_buffer)) {
661 audio_fifo_.reset(new media::AudioPullFifo( 667 audio_fifo_.reset(new media::AudioPullFifo(
662 kChannels, source_frames_per_buffer, 668 kChannels, source_frames_per_buffer,
663 base::Bind(&WebRtcAudioRenderer::SourceCallback, 669 base::Bind(&WebRtcAudioRenderer::SourceCallback,
664 base::Unretained(this)))); 670 base::Unretained(this))));
665 } 671 }
666 sink_params_ = new_sink_params; 672 sink_params_ = new_sink_params;
667 } 673 }
668 674
669 sink_->Initialize(new_sink_params, this); 675 sink_->Initialize(new_sink_params, this);
670 } 676 }
671 677
672 } // namespace content 678 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698