OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_renderer.h" | 5 #include "content/renderer/media/webrtc_audio_renderer.h" |
6 | 6 |
7 #include "base/logging.h" | 7 #include "base/logging.h" |
8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
9 #include "base/strings/string_util.h" | 9 #include "base/strings/string_util.h" |
10 #include "base/strings/stringprintf.h" | 10 #include "base/strings/stringprintf.h" |
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
215 // WebRTC does not yet support higher rates than 96000 on the client side | 215 // WebRTC does not yet support higher rates than 96000 on the client side |
216 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected, | 216 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected, |
217 // we change the rate to 48000 instead. The consequence is that the native | 217 // we change the rate to 48000 instead. The consequence is that the native |
218 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz | 218 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz |
219 // which will then be resampled by the audio converted on the browser side | 219 // which will then be resampled by the audio converted on the browser side |
220 // to match the native audio layer. | 220 // to match the native audio layer. |
221 if (sample_rate == 192000) { | 221 if (sample_rate == 192000) { |
222 DVLOG(1) << "Resampling from 48000 to 192000 is required"; | 222 DVLOG(1) << "Resampling from 48000 to 192000 is required"; |
223 sample_rate = 48000; | 223 sample_rate = 48000; |
224 } | 224 } |
225 media::AudioSampleRate asr = media::AsAudioSampleRate(sample_rate); | 225 media::AudioSampleRate asr; |
226 if (asr != media::kUnexpectedAudioSampleRate) { | 226 if (media::ToAudioSampleRate(sample_rate, &asr)) { |
227 UMA_HISTOGRAM_ENUMERATION( | 227 UMA_HISTOGRAM_ENUMERATION( |
228 "WebRTC.AudioOutputSampleRate", asr, media::kUnexpectedAudioSampleRate); | 228 "WebRTC.AudioOutputSampleRate", asr, media::kAudioSampleRateMax + 1); |
229 } else { | 229 } else { |
230 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate); | 230 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate); |
231 } | 231 } |
232 | 232 |
233 // Verify that the reported output hardware sample rate is supported | 233 // Verify that the reported output hardware sample rate is supported |
234 // on the current platform. | 234 // on the current platform. |
235 if (std::find(&kValidOutputRates[0], | 235 if (std::find(&kValidOutputRates[0], |
236 &kValidOutputRates[0] + arraysize(kValidOutputRates), | 236 &kValidOutputRates[0] + arraysize(kValidOutputRates), |
237 sample_rate) == | 237 sample_rate) == |
238 &kValidOutputRates[arraysize(kValidOutputRates)]) { | 238 &kValidOutputRates[arraysize(kValidOutputRates)]) { |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
323 DCHECK_GE(session_id_, 0); | 323 DCHECK_GE(session_id_, 0); |
324 sink_->InitializeUnifiedStream(sink_params, this, session_id_); | 324 sink_->InitializeUnifiedStream(sink_params, this, session_id_); |
325 | 325 |
326 sink_->Start(); | 326 sink_->Start(); |
327 | 327 |
328 // User must call Play() before any audio can be heard. | 328 // User must call Play() before any audio can be heard. |
329 state_ = PAUSED; | 329 state_ = PAUSED; |
330 | 330 |
331 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", | 331 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", |
332 source_params.channel_layout(), | 332 source_params.channel_layout(), |
333 media::CHANNEL_LAYOUT_MAX); | 333 media::CHANNEL_LAYOUT_MAX + 1); |
334 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", | 334 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", |
335 source_params.frames_per_buffer(), | 335 source_params.frames_per_buffer(), |
336 kUnexpectedAudioBufferSize); | 336 kUnexpectedAudioBufferSize); |
337 AddHistogramFramesPerBuffer(source_params.frames_per_buffer()); | 337 AddHistogramFramesPerBuffer(source_params.frames_per_buffer()); |
338 | 338 |
339 return true; | 339 return true; |
340 } | 340 } |
341 | 341 |
342 scoped_refptr<MediaStreamAudioRenderer> | 342 scoped_refptr<MediaStreamAudioRenderer> |
343 WebRtcAudioRenderer::CreateSharedAudioRendererProxy() { | 343 WebRtcAudioRenderer::CreateSharedAudioRendererProxy() { |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
480 } | 480 } |
481 | 481 |
482 // De-interleave each channel and convert to 32-bit floating-point | 482 // De-interleave each channel and convert to 32-bit floating-point |
483 // with nominal range -1.0 -> +1.0 to match the callback format. | 483 // with nominal range -1.0 -> +1.0 to match the callback format. |
484 audio_bus->FromInterleaved(buffer_.get(), | 484 audio_bus->FromInterleaved(buffer_.get(), |
485 audio_bus->frames(), | 485 audio_bus->frames(), |
486 sizeof(buffer_[0])); | 486 sizeof(buffer_[0])); |
487 } | 487 } |
488 | 488 |
489 } // namespace content | 489 } // namespace content |
OLD | NEW |