Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/webrtc_audio_renderer.h" | 5 #include "content/renderer/media/webrtc_audio_renderer.h" |
| 6 | 6 |
| 7 #include "base/logging.h" | 7 #include "base/logging.h" |
| 8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
| 9 #include "base/strings/string_util.h" | 9 #include "base/strings/string_util.h" |
| 10 #include "base/strings/stringprintf.h" | 10 #include "base/strings/stringprintf.h" |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 40 const int kValidOutputRates[] = {48000, 44100}; | 40 const int kValidOutputRates[] = {48000, 44100}; |
| 41 #elif defined(OS_ANDROID) | 41 #elif defined(OS_ANDROID) |
| 42 // TODO(leozwang): We want to use native sampling rate on Android to achieve | 42 // TODO(leozwang): We want to use native sampling rate on Android to achieve |
| 43 // low latency, currently 16000 is used to work around audio problem on some | 43 // low latency, currently 16000 is used to work around audio problem on some |
| 44 // Android devices. | 44 // Android devices. |
| 45 const int kValidOutputRates[] = {48000, 44100, 16000}; | 45 const int kValidOutputRates[] = {48000, 44100, 16000}; |
| 46 #else | 46 #else |
| 47 const int kValidOutputRates[] = {44100}; | 47 const int kValidOutputRates[] = {44100}; |
| 48 #endif | 48 #endif |
| 49 | 49 |
| 50 // TODO(xians): Merge the following code to WebRtcAudioCapturer, or remove. | |
| 51 enum AudioFramesPerBuffer { | |
| 52 k160, | |
| 53 k320, | |
| 54 k440, | |
| 55 k480, | |
| 56 k640, | |
| 57 k880, | |
| 58 k960, | |
| 59 k1440, | |
| 60 k1920, | |
| 61 kUnexpectedAudioBufferSize // Must always be last! | |
| 62 }; | |
| 63 | |
| 64 // Helper method to convert integral values to their respective enum values | |
| 65 // above, or kUnexpectedAudioBufferSize if no match exists. | |
| 66 // We map 441 to k440 to avoid changes in the XML part for histograms. | |
| 67 // It is still possible to map the histogram result to the actual buffer size. | |
| 68 // See http://crbug.com/243450 for details. | |
| 69 AudioFramesPerBuffer AsAudioFramesPerBuffer(int frames_per_buffer) { | |
| 70 switch (frames_per_buffer) { | |
| 71 case 160: return k160; | |
| 72 case 320: return k320; | |
| 73 case 441: return k440; | |
| 74 case 480: return k480; | |
| 75 case 640: return k640; | |
| 76 case 880: return k880; | |
| 77 case 960: return k960; | |
| 78 case 1440: return k1440; | |
| 79 case 1920: return k1920; | |
| 80 } | |
| 81 return kUnexpectedAudioBufferSize; | |
| 82 } | |
| 83 | |
| 84 void AddHistogramFramesPerBuffer(int param) { | |
| 85 AudioFramesPerBuffer afpb = AsAudioFramesPerBuffer(param); | |
| 86 if (afpb != kUnexpectedAudioBufferSize) { | |
| 87 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", | |
| 88 afpb, kUnexpectedAudioBufferSize); | |
| 89 } else { | |
| 90 // Report unexpected sample rates using a unique histogram name. | |
| 91 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputFramesPerBufferUnexpected", param); | |
| 92 } | |
| 93 } | |
| 94 | |
| 95 // This is a simple wrapper class that's handed out to users of a shared | 50 // This is a simple wrapper class that's handed out to users of a shared |
| 96 // WebRtcAudioRenderer instance. This class maintains the per-user 'playing' | 51 // WebRtcAudioRenderer instance. This class maintains the per-user 'playing' |
| 97 // and 'started' states to avoid problems related to incorrect usage which | 52 // and 'started' states to avoid problems related to incorrect usage which |
| 98 // might violate the implementation assumptions inside WebRtcAudioRenderer | 53 // might violate the implementation assumptions inside WebRtcAudioRenderer |
| 99 // (see the play reference count). | 54 // (see the play reference count). |
| 100 class SharedAudioRenderer : public MediaStreamAudioRenderer { | 55 class SharedAudioRenderer : public MediaStreamAudioRenderer { |
| 101 public: | 56 public: |
| 102 // Callback definition for a callback that is called when when Play(), Pause() | 57 // Callback definition for a callback that is called when when Play(), Pause() |
| 103 // or SetVolume are called (whenever the internal |playing_state_| changes). | 58 // or SetVolume are called (whenever the internal |playing_state_| changes). |
| 104 typedef base::Callback< | 59 typedef base::Callback< |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 191 RenderFrameImpl::FromRoutingID(render_frame_id); | 146 RenderFrameImpl::FromRoutingID(render_frame_id); |
| 192 MediaStreamDispatcher* const dispatcher = frame ? | 147 MediaStreamDispatcher* const dispatcher = frame ? |
| 193 frame->GetMediaStreamDispatcher() : NULL; | 148 frame->GetMediaStreamDispatcher() : NULL; |
| 194 if (dispatcher && dispatcher->IsAudioDuckingActive()) { | 149 if (dispatcher && dispatcher->IsAudioDuckingActive()) { |
| 195 return media::AudioParameters::DUCKING; | 150 return media::AudioParameters::DUCKING; |
| 196 } | 151 } |
| 197 | 152 |
| 198 return media::AudioParameters::NO_EFFECTS; | 153 return media::AudioParameters::NO_EFFECTS; |
| 199 } | 154 } |
| 200 | 155 |
| 156 // Helper method to get platform specific optimal buffer size. | |
| 157 int GetOptimalBufferSize(int sample_rate, int hardware_buffer_size) { | |
| 158 // Use native hardware buffer size as default. | |
| 159 int frames_per_buffer = hardware_buffer_size; | |
| 160 | |
| 161 // On Windows, we strive to open up using native parameters to achieve best | |
|
tommi (sloooow) - chröme
2014/09/03 13:22:11
this comment is a bit confusing since there's no #
no longer working on chromium
2014/09/03 14:34:50
I moved the comment up and explained by we don't h
| |
| 162 // possible performance and to ensure that no FIFO is needed on the browser | |
| 163 // side to match the client request. | |
| 164 | |
| 165 #if defined(OS_LINUX) || defined(OS_MACOSX) | |
| 166 // On Linux and MacOS, the low level IO implementations on the browser side | |
| 167 // supports all buffer size the clients want. We use the native peer | |
| 168 // connection buffer size (10ms) to achieve best possible performance. | |
| 169 frames_per_buffer = sample_rate / 100; | |
| 170 #elif defined(OS_ANDROID) | |
| 171 // TODO(henrika): Keep tuning this scheme and espcicially for low-latency | |
| 172 // cases. Might not be possible to come up with the perfect solution using | |
|
tommi (sloooow) - chröme
2014/09/03 13:22:11
fix indenting here and the next line
no longer working on chromium
2014/09/03 14:34:50
Done.
| |
| 173 // the render side only. | |
| 174 if (frames_per_buffer < 2 * frames_per_10ms) { | |
| 175 // Examples of low-latency frame sizes and the resulting |buffer_size|: | |
| 176 // Nexus 7 : 240 audio frames => 2*480 = 960 | |
| 177 // Nexus 10 : 256 => 2*441 = 882 | |
| 178 // Galaxy Nexus: 144 => 2*441 = 882 | |
| 179 frames_per_buffer = 2 * frames_per_10ms; | |
| 180 DVLOG(1) << "Low-latency output detected on Android"; | |
| 181 } | |
| 182 #endif | |
| 183 | |
| 184 DVLOG(1) << "Using sink output buffer size: " << frames_per_buffer; | |
| 185 return frames_per_buffer; | |
| 186 } | |
| 187 | |
| 201 } // namespace | 188 } // namespace |
| 202 | 189 |
| 203 WebRtcAudioRenderer::WebRtcAudioRenderer( | 190 WebRtcAudioRenderer::WebRtcAudioRenderer( |
| 204 const scoped_refptr<webrtc::MediaStreamInterface>& media_stream, | 191 const scoped_refptr<webrtc::MediaStreamInterface>& media_stream, |
| 205 int source_render_view_id, | 192 int source_render_view_id, |
| 206 int source_render_frame_id, | 193 int source_render_frame_id, |
| 207 int session_id, | 194 int session_id, |
| 208 int sample_rate, | 195 int sample_rate, |
| 209 int frames_per_buffer) | 196 int frames_per_buffer) |
| 210 : state_(UNINITIALIZED), | 197 : state_(UNINITIALIZED), |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 281 // The WebRTC client only supports multiples of 10ms as buffer size where | 268 // The WebRTC client only supports multiples of 10ms as buffer size where |
| 282 // 10ms is preferred for lowest possible delay. | 269 // 10ms is preferred for lowest possible delay. |
| 283 media::AudioParameters source_params; | 270 media::AudioParameters source_params; |
| 284 const int frames_per_10ms = (sample_rate / 100); | 271 const int frames_per_10ms = (sample_rate / 100); |
| 285 DVLOG(1) << "Using WebRTC output buffer size: " << frames_per_10ms; | 272 DVLOG(1) << "Using WebRTC output buffer size: " << frames_per_10ms; |
| 286 | 273 |
| 287 source_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 274 source_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 288 sink_params_.channel_layout(), sink_params_.channels(), | 275 sink_params_.channel_layout(), sink_params_.channels(), |
| 289 sample_rate, 16, frames_per_10ms); | 276 sample_rate, 16, frames_per_10ms); |
| 290 | 277 |
| 291 // Update audio parameters for the sink, i.e., the native audio output stream. | 278 const int frames_per_buffer = |
|
henrika (OOO until Aug 14)
2014/09/03 12:43:58
DVLOG could be useful here.
no longer working on chromium
2014/09/03 12:49:26
There has already been one DVLOG in the end of Get
| |
| 292 // We strive to open up using native parameters to achieve best possible | 279 GetOptimalBufferSize(sample_rate, sink_params_.frames_per_buffer()); |
| 293 // performance and to ensure that no FIFO is needed on the browser side to | |
| 294 // match the client request. Any mismatch between the source and the sink is | |
| 295 // taken care of in this class instead using a pull FIFO. | |
| 296 | |
| 297 // Use native output size as default. | |
| 298 int frames_per_buffer = sink_params_.frames_per_buffer(); | |
| 299 #if defined(OS_ANDROID) | |
| 300 // TODO(henrika): Keep tuning this scheme and espcicially for low-latency | |
| 301 // cases. Might not be possible to come up with the perfect solution using | |
| 302 // the render side only. | |
| 303 if (frames_per_buffer < 2 * frames_per_10ms) { | |
| 304 // Examples of low-latency frame sizes and the resulting |buffer_size|: | |
| 305 // Nexus 7 : 240 audio frames => 2*480 = 960 | |
| 306 // Nexus 10 : 256 => 2*441 = 882 | |
| 307 // Galaxy Nexus: 144 => 2*441 = 882 | |
| 308 frames_per_buffer = 2 * frames_per_10ms; | |
| 309 DVLOG(1) << "Low-latency output detected on Android"; | |
| 310 } | |
| 311 #endif | |
| 312 DVLOG(1) << "Using sink output buffer size: " << frames_per_buffer; | |
| 313 | 280 |
| 314 sink_params_.Reset(sink_params_.format(), sink_params_.channel_layout(), | 281 sink_params_.Reset(sink_params_.format(), sink_params_.channel_layout(), |
| 315 sink_params_.channels(), sample_rate, 16, | 282 sink_params_.channels(), sample_rate, 16, |
| 316 frames_per_buffer); | 283 frames_per_buffer); |
| 317 | 284 |
| 318 // Create a FIFO if re-buffering is required to match the source input with | 285 // Create a FIFO if re-buffering is required to match the source input with |
| 319 // the sink request. The source acts as provider here and the sink as | 286 // the sink request. The source acts as provider here and the sink as |
| 320 // consumer. | 287 // consumer. |
| 321 fifo_delay_milliseconds_ = 0; | 288 fifo_delay_milliseconds_ = 0; |
| 322 if (source_params.frames_per_buffer() != sink_params_.frames_per_buffer()) { | 289 if (source_params.frames_per_buffer() != sink_params_.frames_per_buffer()) { |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 344 source_render_view_id_, source_render_frame_id_); | 311 source_render_view_id_, source_render_frame_id_); |
| 345 | 312 |
| 346 DCHECK_GE(session_id_, 0); | 313 DCHECK_GE(session_id_, 0); |
| 347 sink_->InitializeWithSessionId(sink_params_, this, session_id_); | 314 sink_->InitializeWithSessionId(sink_params_, this, session_id_); |
| 348 | 315 |
| 349 sink_->Start(); | 316 sink_->Start(); |
| 350 | 317 |
| 351 // User must call Play() before any audio can be heard. | 318 // User must call Play() before any audio can be heard. |
| 352 state_ = PAUSED; | 319 state_ = PAUSED; |
| 353 | 320 |
| 354 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", | |
| 355 source_params.frames_per_buffer(), | |
| 356 kUnexpectedAudioBufferSize); | |
| 357 AddHistogramFramesPerBuffer(source_params.frames_per_buffer()); | |
| 358 | |
| 359 return true; | 321 return true; |
| 360 } | 322 } |
| 361 | 323 |
| 362 scoped_refptr<MediaStreamAudioRenderer> | 324 scoped_refptr<MediaStreamAudioRenderer> |
| 363 WebRtcAudioRenderer::CreateSharedAudioRendererProxy( | 325 WebRtcAudioRenderer::CreateSharedAudioRendererProxy( |
| 364 const scoped_refptr<webrtc::MediaStreamInterface>& media_stream) { | 326 const scoped_refptr<webrtc::MediaStreamInterface>& media_stream) { |
| 365 content::SharedAudioRenderer::OnPlayStateChanged on_play_state_changed = | 327 content::SharedAudioRenderer::OnPlayStateChanged on_play_state_changed = |
| 366 base::Bind(&WebRtcAudioRenderer::OnPlayStateChanged, this); | 328 base::Bind(&WebRtcAudioRenderer::OnPlayStateChanged, this); |
| 367 return new SharedAudioRenderer(this, media_stream, on_play_state_changed); | 329 return new SharedAudioRenderer(this, media_stream, on_play_state_changed); |
| 368 } | 330 } |
| (...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 603 if (RemovePlayingState(source, state)) | 565 if (RemovePlayingState(source, state)) |
| 604 EnterPauseState(); | 566 EnterPauseState(); |
| 605 } else if (AddPlayingState(source, state)) { | 567 } else if (AddPlayingState(source, state)) { |
| 606 EnterPlayState(); | 568 EnterPlayState(); |
| 607 } | 569 } |
| 608 UpdateSourceVolume(source); | 570 UpdateSourceVolume(source); |
| 609 } | 571 } |
| 610 } | 572 } |
| 611 | 573 |
| 612 } // namespace content | 574 } // namespace content |
| OLD | NEW |