Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(405)

Side by Side Diff: content/renderer/media/webrtc_audio_renderer.cc

Issue 1809093003: Moving SwitchOutputDevice out of OutputDevice interface, eliminating OutputDevice (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Replace RestartableAudioRendererSink with SwitchableAudioRendererSink in webmediaplayer_impl unit t… Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/webrtc_audio_renderer.h" 5 #include "content/renderer/media/webrtc_audio_renderer.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "base/logging.h" 9 #include "base/logging.h"
10 #include "base/metrics/histogram.h" 10 #include "base/metrics/histogram.h"
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
116 DCHECK(volume >= 0.0f && volume <= 1.0f); 116 DCHECK(volume >= 0.0f && volume <= 1.0f);
117 playing_state_.set_volume(volume); 117 playing_state_.set_volume(volume);
118 on_play_state_changed_.Run(media_stream_, &playing_state_); 118 on_play_state_changed_.Run(media_stream_, &playing_state_);
119 } 119 }
120 120
121 media::OutputDevice* GetOutputDevice() override { 121 media::OutputDevice* GetOutputDevice() override {
122 DCHECK(thread_checker_.CalledOnValidThread()); 122 DCHECK(thread_checker_.CalledOnValidThread());
123 return delegate_->GetOutputDevice(); 123 return delegate_->GetOutputDevice();
124 } 124 }
125 125
126 void SwitchOutputDevice(
127 const std::string& device_id,
128 const url::Origin& security_origin,
129 const media::SwitchOutputDeviceCB& callback) override {
130 DCHECK(thread_checker_.CalledOnValidThread());
131 return delegate_->SwitchOutputDevice(device_id, security_origin, callback);
132 }
133
126 base::TimeDelta GetCurrentRenderTime() const override { 134 base::TimeDelta GetCurrentRenderTime() const override {
127 DCHECK(thread_checker_.CalledOnValidThread()); 135 DCHECK(thread_checker_.CalledOnValidThread());
128 return delegate_->GetCurrentRenderTime(); 136 return delegate_->GetCurrentRenderTime();
129 } 137 }
130 138
131 bool IsLocalRenderer() const override { 139 bool IsLocalRenderer() const override {
132 DCHECK(thread_checker_.CalledOnValidThread()); 140 DCHECK(thread_checker_.CalledOnValidThread());
133 return delegate_->IsLocalRenderer(); 141 return delegate_->IsLocalRenderer();
134 } 142 }
135 143
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
217 { 225 {
218 base::AutoLock auto_lock(lock_); 226 base::AutoLock auto_lock(lock_);
219 DCHECK_EQ(state_, UNINITIALIZED); 227 DCHECK_EQ(state_, UNINITIALIZED);
220 DCHECK(!source_); 228 DCHECK(!source_);
221 } 229 }
222 230
223 sink_ = AudioDeviceFactory::NewAudioRendererSink( 231 sink_ = AudioDeviceFactory::NewAudioRendererSink(
224 AudioDeviceFactory::kSourceWebRtc, source_render_frame_id_, session_id_, 232 AudioDeviceFactory::kSourceWebRtc, source_render_frame_id_, session_id_,
225 output_device_id_, security_origin_); 233 output_device_id_, security_origin_);
226 234
227 if (sink_->GetOutputDevice()->GetDeviceStatus() != 235 media::OutputDevice* device = sink_->GetOutputDevice();
228 media::OUTPUT_DEVICE_STATUS_OK) { 236 if (!(device &&
237 (device->GetDeviceStatus() == media::OUTPUT_DEVICE_STATUS_OK))) {
229 return false; 238 return false;
230 } 239 }
231 240
232 PrepareSink(); 241 PrepareSink();
233 { 242 {
234 // No need to reassert the preconditions because the other thread accessing 243 // No need to reassert the preconditions because the other thread accessing
235 // the fields (checked by |audio_renderer_thread_checker_|) only reads them. 244 // the fields (checked by |audio_renderer_thread_checker_|) only reads them.
236 base::AutoLock auto_lock(lock_); 245 base::AutoLock auto_lock(lock_);
237 source_ = source; 246 source_ = source;
238 247
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
350 void WebRtcAudioRenderer::SetVolume(float volume) { 359 void WebRtcAudioRenderer::SetVolume(float volume) {
351 DCHECK(thread_checker_.CalledOnValidThread()); 360 DCHECK(thread_checker_.CalledOnValidThread());
352 DCHECK(volume >= 0.0f && volume <= 1.0f); 361 DCHECK(volume >= 0.0f && volume <= 1.0f);
353 362
354 playing_state_.set_volume(volume); 363 playing_state_.set_volume(volume);
355 OnPlayStateChanged(media_stream_, &playing_state_); 364 OnPlayStateChanged(media_stream_, &playing_state_);
356 } 365 }
357 366
358 media::OutputDevice* WebRtcAudioRenderer::GetOutputDevice() { 367 media::OutputDevice* WebRtcAudioRenderer::GetOutputDevice() {
359 DCHECK(thread_checker_.CalledOnValidThread()); 368 DCHECK(thread_checker_.CalledOnValidThread());
360 return this; 369 return sink_ ? sink_->GetOutputDevice() : nullptr;
361 } 370 }
362 371
363 base::TimeDelta WebRtcAudioRenderer::GetCurrentRenderTime() const { 372 base::TimeDelta WebRtcAudioRenderer::GetCurrentRenderTime() const {
364 DCHECK(thread_checker_.CalledOnValidThread()); 373 DCHECK(thread_checker_.CalledOnValidThread());
365 base::AutoLock auto_lock(lock_); 374 base::AutoLock auto_lock(lock_);
366 return current_time_; 375 return current_time_;
367 } 376 }
368 377
369 bool WebRtcAudioRenderer::IsLocalRenderer() const { 378 bool WebRtcAudioRenderer::IsLocalRenderer() const {
370 return false; 379 return false;
371 } 380 }
372 381
373 void WebRtcAudioRenderer::SwitchOutputDevice( 382 void WebRtcAudioRenderer::SwitchOutputDevice(
374 const std::string& device_id, 383 const std::string& device_id,
375 const url::Origin& security_origin, 384 const url::Origin& security_origin,
376 const media::SwitchOutputDeviceCB& callback) { 385 const media::SwitchOutputDeviceCB& callback) {
377 DVLOG(1) << "WebRtcAudioRenderer::SwitchOutputDevice()"; 386 DVLOG(1) << "WebRtcAudioRenderer::SwitchOutputDevice()";
378 DCHECK(thread_checker_.CalledOnValidThread()); 387 DCHECK(thread_checker_.CalledOnValidThread());
379 DCHECK_GE(session_id_, 0); 388 DCHECK_GE(session_id_, 0);
380 { 389 {
381 base::AutoLock auto_lock(lock_); 390 base::AutoLock auto_lock(lock_);
382 DCHECK(source_); 391 DCHECK(source_);
383 DCHECK_NE(state_, UNINITIALIZED); 392 DCHECK_NE(state_, UNINITIALIZED);
384 } 393 }
385 394
386 scoped_refptr<media::AudioRendererSink> new_sink = 395 scoped_refptr<media::AudioRendererSink> new_sink =
387 AudioDeviceFactory::NewAudioRendererSink( 396 AudioDeviceFactory::NewAudioRendererSink(
388 AudioDeviceFactory::kSourceWebRtc, source_render_frame_id_, 397 AudioDeviceFactory::kSourceWebRtc, source_render_frame_id_,
389 session_id_, device_id, security_origin); 398 session_id_, device_id, security_origin);
390 if (new_sink->GetOutputDevice()->GetDeviceStatus() != 399 media::OutputDevice* device = new_sink->GetOutputDevice();
391 media::OUTPUT_DEVICE_STATUS_OK) { 400 media::OutputDeviceStatus status =
392 callback.Run(new_sink->GetOutputDevice()->GetDeviceStatus()); 401 device ? device->GetDeviceStatus()
402 : media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL;
403 if (status != media::OUTPUT_DEVICE_STATUS_OK) {
404 callback.Run(status);
393 return; 405 return;
394 } 406 }
395 407
396 // Make sure to stop the sink while _not_ holding the lock since the Render() 408 // Make sure to stop the sink while _not_ holding the lock since the Render()
397 // callback may currently be executing and trying to grab the lock while we're 409 // callback may currently be executing and trying to grab the lock while we're
398 // stopping the thread on which it runs. 410 // stopping the thread on which it runs.
399 sink_->Stop(); 411 sink_->Stop();
400 audio_renderer_thread_checker_.DetachFromThread(); 412 audio_renderer_thread_checker_.DetachFromThread();
401 sink_ = new_sink; 413 sink_ = new_sink;
402 output_device_id_ = device_id; 414 output_device_id_ = device_id;
403 security_origin_ = security_origin; 415 security_origin_ = security_origin;
404 { 416 {
405 base::AutoLock auto_lock(lock_); 417 base::AutoLock auto_lock(lock_);
406 source_->AudioRendererThreadStopped(); 418 source_->AudioRendererThreadStopped();
407 } 419 }
408 PrepareSink(); 420 PrepareSink();
409 sink_->Start(); 421 sink_->Start();
410 422
411 callback.Run(media::OUTPUT_DEVICE_STATUS_OK); 423 callback.Run(media::OUTPUT_DEVICE_STATUS_OK);
412 } 424 }
413 425
414 media::AudioParameters WebRtcAudioRenderer::GetOutputParameters() {
415 DCHECK(thread_checker_.CalledOnValidThread());
416 if (!sink_.get())
417 return media::AudioParameters();
418
419 return sink_->GetOutputDevice()->GetOutputParameters();
420 }
421
422 media::OutputDeviceStatus WebRtcAudioRenderer::GetDeviceStatus() {
423 DCHECK(thread_checker_.CalledOnValidThread());
424 if (!sink_.get())
425 return media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL;
426
427 return sink_->GetOutputDevice()->GetDeviceStatus();
428 }
429
430 int WebRtcAudioRenderer::Render(media::AudioBus* audio_bus, 426 int WebRtcAudioRenderer::Render(media::AudioBus* audio_bus,
431 uint32_t frames_delayed, 427 uint32_t frames_delayed,
432 uint32_t frames_skipped) { 428 uint32_t frames_skipped) {
433 DCHECK(audio_renderer_thread_checker_.CalledOnValidThread()); 429 DCHECK(audio_renderer_thread_checker_.CalledOnValidThread());
434 base::AutoLock auto_lock(lock_); 430 base::AutoLock auto_lock(lock_);
435 if (!source_) 431 if (!source_)
436 return 0; 432 return 0;
437 433
438 // TODO(grunell): Converting from frames to milliseconds will potentially lose 434 // TODO(grunell): Converting from frames to milliseconds will potentially lose
439 // hundreds of microseconds which may cause audio video drift. Update 435 // hundreds of microseconds which may cause audio video drift. Update
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
620 } 616 }
621 } 617 }
622 618
623 void WebRtcAudioRenderer::PrepareSink() { 619 void WebRtcAudioRenderer::PrepareSink() {
624 DCHECK(thread_checker_.CalledOnValidThread()); 620 DCHECK(thread_checker_.CalledOnValidThread());
625 media::AudioParameters new_sink_params; 621 media::AudioParameters new_sink_params;
626 { 622 {
627 base::AutoLock lock(lock_); 623 base::AutoLock lock(lock_);
628 new_sink_params = sink_params_; 624 new_sink_params = sink_params_;
629 } 625 }
626
627 media::OutputDevice* device = sink_->GetOutputDevice();
628 DCHECK(device);
629 const media::AudioParameters output_params = device->GetOutputParameters();
630
630 // WebRTC does not yet support higher rates than 96000 on the client side 631 // WebRTC does not yet support higher rates than 96000 on the client side
631 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected, 632 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected,
632 // we change the rate to 48000 instead. The consequence is that the native 633 // we change the rate to 48000 instead. The consequence is that the native
633 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz 634 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz
634 // which will then be resampled by the audio converted on the browser side 635 // which will then be resampled by the audio converted on the browser side
635 // to match the native audio layer. 636 // to match the native audio layer.
636 int sample_rate = 637 int sample_rate = output_params.sample_rate();
637 sink_->GetOutputDevice()->GetOutputParameters().sample_rate();
638 DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; 638 DVLOG(1) << "Audio output hardware sample rate: " << sample_rate;
639 if (sample_rate >= 192000) { 639 if (sample_rate >= 192000) {
640 DVLOG(1) << "Resampling from 48000 to " << sample_rate << " is required"; 640 DVLOG(1) << "Resampling from 48000 to " << sample_rate << " is required";
641 sample_rate = 48000; 641 sample_rate = 48000;
642 } 642 }
643 media::AudioSampleRate asr; 643 media::AudioSampleRate asr;
644 if (media::ToAudioSampleRate(sample_rate, &asr)) { 644 if (media::ToAudioSampleRate(sample_rate, &asr)) {
645 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputSampleRate", asr, 645 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputSampleRate", asr,
646 media::kAudioSampleRateMax + 1); 646 media::kAudioSampleRateMax + 1);
647 } else { 647 } else {
648 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate); 648 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate);
649 } 649 }
650 650
651 // Calculate the frames per buffer for the source, i.e. the WebRTC client. We 651 // Calculate the frames per buffer for the source, i.e. the WebRTC client. We
652 // use 10 ms of data since the WebRTC client only supports multiples of 10 ms 652 // use 10 ms of data since the WebRTC client only supports multiples of 10 ms
653 // as buffer size where 10 ms is preferred for lowest possible delay. 653 // as buffer size where 10 ms is preferred for lowest possible delay.
654 const int source_frames_per_buffer = (sample_rate / 100); 654 const int source_frames_per_buffer = (sample_rate / 100);
655 DVLOG(1) << "Using WebRTC output buffer size: " << source_frames_per_buffer; 655 DVLOG(1) << "Using WebRTC output buffer size: " << source_frames_per_buffer;
656 656
657 // Setup sink parameters. 657 // Setup sink parameters.
658 const int sink_frames_per_buffer = GetOptimalBufferSize( 658 const int sink_frames_per_buffer =
659 sample_rate, 659 GetOptimalBufferSize(sample_rate, output_params.frames_per_buffer());
660 sink_->GetOutputDevice()->GetOutputParameters().frames_per_buffer());
661 new_sink_params.set_sample_rate(sample_rate); 660 new_sink_params.set_sample_rate(sample_rate);
662 new_sink_params.set_frames_per_buffer(sink_frames_per_buffer); 661 new_sink_params.set_frames_per_buffer(sink_frames_per_buffer);
663 662
664 // Create a FIFO if re-buffering is required to match the source input with 663 // Create a FIFO if re-buffering is required to match the source input with
665 // the sink request. The source acts as provider here and the sink as 664 // the sink request. The source acts as provider here and the sink as
666 // consumer. 665 // consumer.
667 const bool different_source_sink_frames = 666 const bool different_source_sink_frames =
668 source_frames_per_buffer != new_sink_params.frames_per_buffer(); 667 source_frames_per_buffer != new_sink_params.frames_per_buffer();
669 if (different_source_sink_frames) { 668 if (different_source_sink_frames) {
670 DVLOG(1) << "Rebuffering from " << source_frames_per_buffer << " to " 669 DVLOG(1) << "Rebuffering from " << source_frames_per_buffer << " to "
671 << new_sink_params.frames_per_buffer(); 670 << new_sink_params.frames_per_buffer();
672 } 671 }
673 { 672 {
674 base::AutoLock lock(lock_); 673 base::AutoLock lock(lock_);
675 if ((!audio_fifo_ && different_source_sink_frames) || 674 if ((!audio_fifo_ && different_source_sink_frames) ||
676 (audio_fifo_ && 675 (audio_fifo_ &&
677 audio_fifo_->SizeInFrames() != source_frames_per_buffer)) { 676 audio_fifo_->SizeInFrames() != source_frames_per_buffer)) {
678 audio_fifo_.reset(new media::AudioPullFifo( 677 audio_fifo_.reset(new media::AudioPullFifo(
679 kChannels, source_frames_per_buffer, 678 kChannels, source_frames_per_buffer,
680 base::Bind(&WebRtcAudioRenderer::SourceCallback, 679 base::Bind(&WebRtcAudioRenderer::SourceCallback,
681 base::Unretained(this)))); 680 base::Unretained(this))));
682 } 681 }
683 sink_params_ = new_sink_params; 682 sink_params_ = new_sink_params;
684 } 683 }
685 684
686 sink_->Initialize(new_sink_params, this); 685 sink_->Initialize(new_sink_params, this);
687 } 686 }
688 687
689 } // namespace content 688 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698