| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/webrtc_audio_renderer.h" | 5 #include "content/renderer/media/webrtc_audio_renderer.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/bind_helpers.h" | 10 #include "base/bind_helpers.h" |
| 11 #include "base/location.h" | 11 #include "base/location.h" |
| 12 #include "base/logging.h" | 12 #include "base/logging.h" |
| 13 #include "base/metrics/histogram.h" | 13 #include "base/metrics/histogram.h" |
| 14 #include "base/strings/string_util.h" | 14 #include "base/strings/string_util.h" |
| 15 #include "base/strings/stringprintf.h" | 15 #include "base/strings/stringprintf.h" |
| 16 #include "build/build_config.h" | 16 #include "build/build_config.h" |
| 17 #include "content/renderer/media/audio_device_factory.h" | 17 #include "content/renderer/media/audio_device_factory.h" |
| 18 #include "content/renderer/media/media_stream_audio_track.h" | 18 #include "content/renderer/media/media_stream_audio_track.h" |
| 19 #include "content/renderer/media/webrtc/peer_connection_remote_audio_source.h" | 19 #include "content/renderer/media/webrtc/peer_connection_remote_audio_source.h" |
| 20 #include "content/renderer/media/webrtc_logging.h" | 20 #include "content/renderer/media/webrtc_logging.h" |
| 21 #include "media/audio/sample_rates.h" | 21 #include "media/audio/sample_rates.h" |
| 22 #include "media/base/audio_capturer_source.h" | 22 #include "media/base/audio_capturer_source.h" |
| 23 #include "media/base/audio_latency.h" |
| 23 #include "media/base/audio_parameters.h" | 24 #include "media/base/audio_parameters.h" |
| 24 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" | 25 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" |
| 25 #include "third_party/webrtc/api/mediastreaminterface.h" | 26 #include "third_party/webrtc/api/mediastreaminterface.h" |
| 26 | 27 |
| 27 #if defined(OS_WIN) | 28 #if defined(OS_WIN) |
| 28 #include "base/win/windows_version.h" | 29 #include "base/win/windows_version.h" |
| 29 #include "media/audio/win/core_audio_util_win.h" | 30 #include "media/audio/win/core_audio_util_win.h" |
| 30 #endif | 31 #endif |
| 31 | 32 |
| 32 namespace content { | 33 namespace content { |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 145 base::ThreadChecker thread_checker_; | 146 base::ThreadChecker thread_checker_; |
| 146 const scoped_refptr<MediaStreamAudioRenderer> delegate_; | 147 const scoped_refptr<MediaStreamAudioRenderer> delegate_; |
| 147 const blink::WebMediaStream media_stream_; | 148 const blink::WebMediaStream media_stream_; |
| 148 bool started_; | 149 bool started_; |
| 149 WebRtcAudioRenderer::PlayingState playing_state_; | 150 WebRtcAudioRenderer::PlayingState playing_state_; |
| 150 OnPlayStateChanged on_play_state_changed_; | 151 OnPlayStateChanged on_play_state_changed_; |
| 151 }; | 152 }; |
| 152 | 153 |
| 153 } // namespace | 154 } // namespace |
| 154 | 155 |
| 155 int WebRtcAudioRenderer::GetOptimalBufferSize(int sample_rate, | |
| 156 int hardware_buffer_size) { | |
| 157 // Use native hardware buffer size as default. On Windows, we strive to open | |
| 158 // up using this native hardware buffer size to achieve best | |
| 159 // possible performance and to ensure that no FIFO is needed on the browser | |
| 160 // side to match the client request. That is why there is no #if case for | |
| 161 // Windows below. | |
| 162 int frames_per_buffer = hardware_buffer_size; | |
| 163 | |
| 164 #if defined(OS_LINUX) || defined(OS_MACOSX) | |
| 165 // On Linux and MacOS, the low level IO implementations on the browser side | |
| 166 // supports all buffer size the clients want. We use the native peer | |
| 167 // connection buffer size (10ms) to achieve best possible performance. | |
| 168 frames_per_buffer = sample_rate / 100; | |
| 169 #elif defined(OS_ANDROID) | |
| 170 // TODO(henrika): Keep tuning this scheme and espcicially for low-latency | |
| 171 // cases. Might not be possible to come up with the perfect solution using | |
| 172 // the render side only. | |
| 173 int frames_per_10ms = sample_rate / 100; | |
| 174 if (frames_per_buffer < 2 * frames_per_10ms) { | |
| 175 // Examples of low-latency frame sizes and the resulting |buffer_size|: | |
| 176 // Nexus 7 : 240 audio frames => 2*480 = 960 | |
| 177 // Nexus 10 : 256 => 2*441 = 882 | |
| 178 // Galaxy Nexus: 144 => 2*441 = 882 | |
| 179 frames_per_buffer = 2 * frames_per_10ms; | |
| 180 DVLOG(1) << "Low-latency output detected on Android"; | |
| 181 } | |
| 182 #endif | |
| 183 | |
| 184 DVLOG(1) << "Using sink output buffer size: " << frames_per_buffer; | |
| 185 return frames_per_buffer; | |
| 186 } | |
| 187 | |
| 188 WebRtcAudioRenderer::WebRtcAudioRenderer( | 156 WebRtcAudioRenderer::WebRtcAudioRenderer( |
| 189 const scoped_refptr<base::SingleThreadTaskRunner>& signaling_thread, | 157 const scoped_refptr<base::SingleThreadTaskRunner>& signaling_thread, |
| 190 const blink::WebMediaStream& media_stream, | 158 const blink::WebMediaStream& media_stream, |
| 191 int source_render_frame_id, | 159 int source_render_frame_id, |
| 192 int session_id, | 160 int session_id, |
| 193 const std::string& device_id, | 161 const std::string& device_id, |
| 194 const url::Origin& security_origin) | 162 const url::Origin& security_origin) |
| 195 : state_(UNINITIALIZED), | 163 : state_(UNINITIALIZED), |
| 196 source_render_frame_id_(source_render_frame_id), | 164 source_render_frame_id_(source_render_frame_id), |
| 197 session_id_(session_id), | 165 session_id_(session_id), |
| (...skipping 450 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 648 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate); | 616 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate); |
| 649 } | 617 } |
| 650 | 618 |
| 651 // Calculate the frames per buffer for the source, i.e. the WebRTC client. We | 619 // Calculate the frames per buffer for the source, i.e. the WebRTC client. We |
| 652 // use 10 ms of data since the WebRTC client only supports multiples of 10 ms | 620 // use 10 ms of data since the WebRTC client only supports multiples of 10 ms |
| 653 // as buffer size where 10 ms is preferred for lowest possible delay. | 621 // as buffer size where 10 ms is preferred for lowest possible delay. |
| 654 const int source_frames_per_buffer = (sample_rate / 100); | 622 const int source_frames_per_buffer = (sample_rate / 100); |
| 655 DVLOG(1) << "Using WebRTC output buffer size: " << source_frames_per_buffer; | 623 DVLOG(1) << "Using WebRTC output buffer size: " << source_frames_per_buffer; |
| 656 | 624 |
| 657 // Setup sink parameters. | 625 // Setup sink parameters. |
| 658 const int sink_frames_per_buffer = GetOptimalBufferSize( | 626 const int sink_frames_per_buffer = media::AudioLatency::GetRtcBufferSize( |
| 659 sample_rate, device_info.output_params().frames_per_buffer()); | 627 sample_rate, device_info.output_params().frames_per_buffer()); |
| 660 new_sink_params.set_sample_rate(sample_rate); | 628 new_sink_params.set_sample_rate(sample_rate); |
| 661 new_sink_params.set_frames_per_buffer(sink_frames_per_buffer); | 629 new_sink_params.set_frames_per_buffer(sink_frames_per_buffer); |
| 662 | 630 |
| 663 // Create a FIFO if re-buffering is required to match the source input with | 631 // Create a FIFO if re-buffering is required to match the source input with |
| 664 // the sink request. The source acts as provider here and the sink as | 632 // the sink request. The source acts as provider here and the sink as |
| 665 // consumer. | 633 // consumer. |
| 666 const bool different_source_sink_frames = | 634 const bool different_source_sink_frames = |
| 667 source_frames_per_buffer != new_sink_params.frames_per_buffer(); | 635 source_frames_per_buffer != new_sink_params.frames_per_buffer(); |
| 668 if (different_source_sink_frames) { | 636 if (different_source_sink_frames) { |
| (...skipping 10 matching lines...) Expand all Loading... |
| 679 base::Bind(&WebRtcAudioRenderer::SourceCallback, | 647 base::Bind(&WebRtcAudioRenderer::SourceCallback, |
| 680 base::Unretained(this)))); | 648 base::Unretained(this)))); |
| 681 } | 649 } |
| 682 sink_params_ = new_sink_params; | 650 sink_params_ = new_sink_params; |
| 683 } | 651 } |
| 684 | 652 |
| 685 sink_->Initialize(new_sink_params, this); | 653 sink_->Initialize(new_sink_params, this); |
| 686 } | 654 } |
| 687 | 655 |
| 688 } // namespace content | 656 } // namespace content |
| OLD | NEW |