OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_renderer.h" | 5 #include "content/renderer/media/webrtc_audio_renderer.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "base/logging.h" | 9 #include "base/logging.h" |
10 #include "base/metrics/histogram.h" | 10 #include "base/metrics/histogram.h" |
(...skipping 21 matching lines...) Expand all Loading... | |
32 | 32 |
33 namespace content { | 33 namespace content { |
34 | 34 |
35 namespace { | 35 namespace { |
36 | 36 |
37 // We add a UMA histogram measuring the execution time of the Render() method | 37 // We add a UMA histogram measuring the execution time of the Render() method |
38 // every |kNumCallbacksBetweenRenderTimeHistograms| callback. Assuming 10ms | 38 // every |kNumCallbacksBetweenRenderTimeHistograms| callback. Assuming 10ms |
39 // between each callback leads to one UMA update each 100ms. | 39 // between each callback leads to one UMA update each 100ms. |
40 const int kNumCallbacksBetweenRenderTimeHistograms = 10; | 40 const int kNumCallbacksBetweenRenderTimeHistograms = 10; |
41 | 41 |
42 // Audio parameters that don't change. | |
43 const media::AudioParameters::Format kFormat = | |
44 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | |
45 const media::ChannelLayout kChannelLayout = media::CHANNEL_LAYOUT_STEREO; | |
46 const int kChannels = 2; | |
47 const int kBitsPerSample = 16; | |
48 | |
42 // This is a simple wrapper class that's handed out to users of a shared | 49 // This is a simple wrapper class that's handed out to users of a shared |
43 // WebRtcAudioRenderer instance. This class maintains the per-user 'playing' | 50 // WebRtcAudioRenderer instance. This class maintains the per-user 'playing' |
44 // and 'started' states to avoid problems related to incorrect usage which | 51 // and 'started' states to avoid problems related to incorrect usage which |
45 // might violate the implementation assumptions inside WebRtcAudioRenderer | 52 // might violate the implementation assumptions inside WebRtcAudioRenderer |
46 // (see the play reference count). | 53 // (see the play reference count). |
47 class SharedAudioRenderer : public MediaStreamAudioRenderer { | 54 class SharedAudioRenderer : public MediaStreamAudioRenderer { |
48 public: | 55 public: |
49 // Callback definition for a callback that is called when when Play(), Pause() | 56 // Callback definition for a callback that is called when when Play(), Pause() |
50 // or SetVolume are called (whenever the internal |playing_state_| changes). | 57 // or SetVolume are called (whenever the internal |playing_state_| changes). |
51 typedef base::Callback<void(const blink::WebMediaStream&, | 58 typedef base::Callback<void(const blink::WebMediaStream&, |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
180 const url::Origin& security_origin) | 187 const url::Origin& security_origin) |
181 : state_(UNINITIALIZED), | 188 : state_(UNINITIALIZED), |
182 source_render_frame_id_(source_render_frame_id), | 189 source_render_frame_id_(source_render_frame_id), |
183 session_id_(session_id), | 190 session_id_(session_id), |
184 signaling_thread_(signaling_thread), | 191 signaling_thread_(signaling_thread), |
185 media_stream_(media_stream), | 192 media_stream_(media_stream), |
186 source_(NULL), | 193 source_(NULL), |
187 play_ref_count_(0), | 194 play_ref_count_(0), |
188 start_ref_count_(0), | 195 start_ref_count_(0), |
189 audio_delay_milliseconds_(0), | 196 audio_delay_milliseconds_(0), |
190 fifo_delay_milliseconds_(0), | 197 sink_params_(kFormat, kChannelLayout, 0, kBitsPerSample, 0), |
191 sink_params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
192 media::CHANNEL_LAYOUT_STEREO, | |
193 0, | |
194 16, | |
195 0), | |
196 output_device_id_(device_id), | 198 output_device_id_(device_id), |
197 security_origin_(security_origin), | 199 security_origin_(security_origin), |
198 render_callback_count_(0) { | 200 render_callback_count_(0) { |
199 WebRtcLogMessage(base::StringPrintf( | 201 WebRtcLogMessage(base::StringPrintf( |
200 "WAR::WAR. source_render_frame_id=%d, session_id=%d, effects=%i", | 202 "WAR::WAR. source_render_frame_id=%d, session_id=%d, effects=%i", |
201 source_render_frame_id, session_id, sink_params_.effects())); | 203 source_render_frame_id, session_id, sink_params_.effects())); |
202 audio_renderer_thread_checker_.DetachFromThread(); | 204 audio_renderer_thread_checker_.DetachFromThread(); |
203 } | 205 } |
204 | 206 |
205 WebRtcAudioRenderer::~WebRtcAudioRenderer() { | 207 WebRtcAudioRenderer::~WebRtcAudioRenderer() { |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
427 base::AutoLock auto_lock(lock_); | 429 base::AutoLock auto_lock(lock_); |
428 if (!source_) | 430 if (!source_) |
429 return 0; | 431 return 0; |
430 | 432 |
431 DVLOG(2) << "WebRtcAudioRenderer::Render()"; | 433 DVLOG(2) << "WebRtcAudioRenderer::Render()"; |
432 DVLOG(2) << "audio_delay_milliseconds: " << audio_delay_milliseconds; | 434 DVLOG(2) << "audio_delay_milliseconds: " << audio_delay_milliseconds; |
433 | 435 |
434 DCHECK_LE(audio_delay_milliseconds, static_cast<uint32_t>(INT_MAX)); | 436 DCHECK_LE(audio_delay_milliseconds, static_cast<uint32_t>(INT_MAX)); |
435 audio_delay_milliseconds_ = static_cast<int>(audio_delay_milliseconds); | 437 audio_delay_milliseconds_ = static_cast<int>(audio_delay_milliseconds); |
436 | 438 |
439 // If there are skipped frames, pull and throw away the same amount. We always | |
440 // pull 10 ms of data from the source (see PrepareSink()), so the fifo is only | |
441 // required if the number of frames to drop doesn't correspond to 10 ms. | |
442 if (frames_skipped > 0) { | |
443 const uint32_t source_frames_per_buffer = | |
444 static_cast<uint32_t>(sink_params_.sample_rate() / 100); | |
445 if (!audio_fifo_ && frames_skipped != source_frames_per_buffer) { | |
446 audio_fifo_.reset(new media::AudioPullFifo( | |
447 kChannels, source_frames_per_buffer, | |
448 base::Bind(&WebRtcAudioRenderer::SourceCallback, | |
449 base::Unretained(this)))); | |
450 } | |
451 | |
452 scoped_ptr<media::AudioBus> drop_bus = | |
453 media::AudioBus::Create(audio_bus->channels(), frames_skipped); | |
454 if (audio_fifo_) | |
455 audio_fifo_->Consume(drop_bus.get(), drop_bus->frames()); | |
456 else | |
457 SourceCallback(0, drop_bus.get()); | |
458 } | |
459 | |
460 // Pull the data we will deliver. | |
437 if (audio_fifo_) | 461 if (audio_fifo_) |
438 audio_fifo_->Consume(audio_bus, audio_bus->frames()); | 462 audio_fifo_->Consume(audio_bus, audio_bus->frames()); |
439 else | 463 else |
440 SourceCallback(0, audio_bus); | 464 SourceCallback(0, audio_bus); |
441 | 465 |
442 return (state_ == PLAYING) ? audio_bus->frames() : 0; | 466 return (state_ == PLAYING) ? audio_bus->frames() : 0; |
443 } | 467 } |
444 | 468 |
445 void WebRtcAudioRenderer::OnRenderError() { | 469 void WebRtcAudioRenderer::OnRenderError() { |
446 NOTIMPLEMENTED(); | 470 NOTIMPLEMENTED(); |
447 LOG(ERROR) << "OnRenderError()"; | 471 LOG(ERROR) << "OnRenderError()"; |
448 } | 472 } |
449 | 473 |
450 // Called by AudioPullFifo when more data is necessary. | 474 // Called by AudioPullFifo when more data is necessary. |
451 void WebRtcAudioRenderer::SourceCallback( | 475 void WebRtcAudioRenderer::SourceCallback( |
452 int fifo_frame_delay, media::AudioBus* audio_bus) { | 476 int fifo_frame_delay, media::AudioBus* audio_bus) { |
453 DCHECK(audio_renderer_thread_checker_.CalledOnValidThread()); | 477 DCHECK(audio_renderer_thread_checker_.CalledOnValidThread()); |
454 base::TimeTicks start_time = base::TimeTicks::Now(); | 478 base::TimeTicks start_time = base::TimeTicks::Now(); |
455 DVLOG(2) << "WebRtcAudioRenderer::SourceCallback(" | 479 DVLOG(2) << "WebRtcAudioRenderer::SourceCallback(" |
456 << fifo_frame_delay << ", " | 480 << fifo_frame_delay << ", " |
457 << audio_bus->frames() << ")"; | 481 << audio_bus->frames() << ")"; |
458 | 482 |
459 int output_delay_milliseconds = audio_delay_milliseconds_; | 483 int output_delay_milliseconds = audio_delay_milliseconds_; |
460 output_delay_milliseconds += fifo_delay_milliseconds_; | 484 output_delay_milliseconds += fifo_frame_delay * |
485 base::Time::kMillisecondsPerSecond / | |
486 sink_params_.sample_rate(); | |
461 DVLOG(2) << "output_delay_milliseconds: " << output_delay_milliseconds; | 487 DVLOG(2) << "output_delay_milliseconds: " << output_delay_milliseconds; |
462 | 488 |
463 // We need to keep render data for the |source_| regardless of |state_|, | 489 // We need to keep render data for the |source_| regardless of |state_|, |
464 // otherwise the data will be buffered up inside |source_|. | 490 // otherwise the data will be buffered up inside |source_|. |
465 source_->RenderData(audio_bus, sink_params_.sample_rate(), | 491 source_->RenderData(audio_bus, sink_params_.sample_rate(), |
466 output_delay_milliseconds, | 492 output_delay_milliseconds, |
467 ¤t_time_); | 493 ¤t_time_); |
468 | 494 |
469 // Avoid filling up the audio bus if we are not playing; instead | 495 // Avoid filling up the audio bus if we are not playing; instead |
470 // return here and ensure that the returned value in Render() is 0. | 496 // return here and ensure that the returned value in Render() is 0. |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
599 sample_rate = 48000; | 625 sample_rate = 48000; |
600 } | 626 } |
601 media::AudioSampleRate asr; | 627 media::AudioSampleRate asr; |
602 if (media::ToAudioSampleRate(sample_rate, &asr)) { | 628 if (media::ToAudioSampleRate(sample_rate, &asr)) { |
603 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputSampleRate", asr, | 629 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputSampleRate", asr, |
604 media::kAudioSampleRateMax + 1); | 630 media::kAudioSampleRateMax + 1); |
605 } else { | 631 } else { |
606 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate); | 632 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate); |
607 } | 633 } |
608 | 634 |
609 // Set up audio parameters for the source, i.e., the WebRTC client. | 635 // Calculate the frames per buffer for the source, i.e. the WebRTC client. We |
636 // use 10 ms of data since the WebRTC client only supports multiples of 10 ms | |
637 // as buffer size where 10 ms is preferred for lowest possible delay. | |
638 const int source_frames_per_buffer = (sample_rate / 100); | |
639 DVLOG(1) << "Using WebRTC output buffer size: " << source_frames_per_buffer; | |
610 | 640 |
611 // The WebRTC client only supports multiples of 10ms as buffer size where | 641 // Setup sink parameters. |
612 // 10ms is preferred for lowest possible delay. | 642 const int sink_frames_per_buffer = GetOptimalBufferSize( |
613 const int frames_per_10ms = (sample_rate / 100); | |
614 DVLOG(1) << "Using WebRTC output buffer size: " << frames_per_10ms; | |
615 media::AudioParameters source_params( | |
616 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
617 new_sink_params.channel_layout(), sample_rate, 16, frames_per_10ms); | |
618 source_params.set_channels_for_discrete(new_sink_params.channels()); | |
619 | |
620 const int frames_per_buffer = GetOptimalBufferSize( | |
621 sample_rate, sink_->GetOutputParameters().frames_per_buffer()); | 643 sample_rate, sink_->GetOutputParameters().frames_per_buffer()); |
622 | 644 new_sink_params.set_sample_rate(sample_rate); |
623 new_sink_params.Reset( | 645 new_sink_params.set_frames_per_buffer(sink_frames_per_buffer); |
624 new_sink_params.format(), new_sink_params.channel_layout(), | |
625 sample_rate, 16, frames_per_buffer); | |
626 | 646 |
627 // Create a FIFO if re-buffering is required to match the source input with | 647 // Create a FIFO if re-buffering is required to match the source input with |
628 // the sink request. The source acts as provider here and the sink as | 648 // the sink request. The source acts as provider here and the sink as |
629 // consumer. | 649 // consumer. |
630 int new_fifo_delay_milliseconds = 0; | 650 const bool different_source_sink_frames = |
631 scoped_ptr<media::AudioPullFifo> new_audio_fifo; | 651 source_frames_per_buffer != new_sink_params.frames_per_buffer(); |
632 if (source_params.frames_per_buffer() != | 652 if (different_source_sink_frames) { |
633 new_sink_params.frames_per_buffer()) { | 653 DVLOG(1) << "Rebuffering from " << source_frames_per_buffer << " to " |
634 DVLOG(1) << "Rebuffering from " << source_params.frames_per_buffer() | 654 << new_sink_params.frames_per_buffer(); |
635 << " to " << new_sink_params.frames_per_buffer(); | |
636 new_audio_fifo.reset(new media::AudioPullFifo( | |
637 source_params.channels(), source_params.frames_per_buffer(), | |
638 base::Bind(&WebRtcAudioRenderer::SourceCallback, | |
639 base::Unretained(this)))); | |
640 | |
641 if (new_sink_params.frames_per_buffer() > | |
642 source_params.frames_per_buffer()) { | |
643 int frame_duration_milliseconds = | |
644 base::Time::kMillisecondsPerSecond / | |
645 static_cast<double>(source_params.sample_rate()); | |
646 new_fifo_delay_milliseconds = (new_sink_params.frames_per_buffer() - | |
647 source_params.frames_per_buffer()) * | |
648 frame_duration_milliseconds; | |
649 } | |
650 } | 655 } |
651 | |
652 { | 656 { |
653 base::AutoLock lock(lock_); | 657 base::AutoLock lock(lock_); |
658 if ((!audio_fifo_ && different_source_sink_frames) || | |
659 (audio_fifo_ && | |
o1ka
2016/01/22 13:15:03
So in certain cases an existing fifo is reset. Doe
Henrik Grunell
2016/01/25 11:56:23
[We talked about this offline, summarizing here.]
| |
660 audio_fifo_->SizeInFrames() != source_frames_per_buffer)) { | |
661 audio_fifo_.reset(new media::AudioPullFifo( | |
662 kChannels, source_frames_per_buffer, | |
663 base::Bind(&WebRtcAudioRenderer::SourceCallback, | |
664 base::Unretained(this)))); | |
665 } | |
654 sink_params_ = new_sink_params; | 666 sink_params_ = new_sink_params; |
655 fifo_delay_milliseconds_ = new_fifo_delay_milliseconds; | |
656 if (new_audio_fifo.get()) | |
657 audio_fifo_ = std::move(new_audio_fifo); | |
658 } | 667 } |
659 | 668 |
660 sink_->Initialize(new_sink_params, this); | 669 sink_->Initialize(new_sink_params, this); |
661 } | 670 } |
662 | 671 |
663 } // namespace content | 672 } // namespace content |
OLD | NEW |