| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/webrtc_audio_renderer.h" | 5 #include "content/renderer/media/webrtc_audio_renderer.h" |
| 6 | 6 |
| 7 #include "base/logging.h" | 7 #include "base/logging.h" |
| 8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
| 9 #include "base/strings/string_util.h" | 9 #include "base/strings/string_util.h" |
| 10 #include "content/renderer/media/audio_device_factory.h" | 10 #include "content/renderer/media/audio_device_factory.h" |
| 11 #include "content/renderer/media/webrtc_audio_device_impl.h" | 11 #include "content/renderer/media/webrtc_audio_device_impl.h" |
| 12 #include "content/renderer/render_thread_impl.h" | |
| 13 #include "media/audio/audio_output_device.h" | 12 #include "media/audio/audio_output_device.h" |
| 14 #include "media/audio/audio_parameters.h" | 13 #include "media/audio/audio_parameters.h" |
| 15 #include "media/audio/sample_rates.h" | 14 #include "media/audio/sample_rates.h" |
| 16 #include "media/base/audio_hardware_config.h" | |
| 17 | 15 |
| 18 #if defined(OS_WIN) | 16 #if defined(OS_WIN) |
| 19 #include "base/win/windows_version.h" | 17 #include "base/win/windows_version.h" |
| 20 #include "media/audio/win/core_audio_util_win.h" | 18 #include "media/audio/win/core_audio_util_win.h" |
| 21 #endif | 19 #endif |
| 22 | 20 |
| 23 namespace content { | 21 namespace content { |
| 24 | 22 |
| 25 namespace { | 23 namespace { |
| 26 | 24 |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 83 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", | 81 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", |
| 84 afpb, kUnexpectedAudioBufferSize); | 82 afpb, kUnexpectedAudioBufferSize); |
| 85 } else { | 83 } else { |
| 86 // Report unexpected sample rates using a unique histogram name. | 84 // Report unexpected sample rates using a unique histogram name. |
| 87 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputFramesPerBufferUnexpected", param); | 85 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputFramesPerBufferUnexpected", param); |
| 88 } | 86 } |
| 89 } | 87 } |
| 90 | 88 |
| 91 } // namespace | 89 } // namespace |
| 92 | 90 |
| 93 WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id) | 91 WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id, |
| 92 int session_id, |
| 93 int sample_rate, |
| 94 int frames_per_buffer) |
| 94 : state_(UNINITIALIZED), | 95 : state_(UNINITIALIZED), |
| 95 source_render_view_id_(source_render_view_id), | 96 source_render_view_id_(source_render_view_id), |
| 97 session_id_(session_id), |
| 96 source_(NULL), | 98 source_(NULL), |
| 97 play_ref_count_(0), | 99 play_ref_count_(0), |
| 98 audio_delay_milliseconds_(0), | 100 audio_delay_milliseconds_(0), |
| 99 fifo_delay_milliseconds_(0) { | 101 fifo_delay_milliseconds_(0), |
| 102 sample_rate_(sample_rate), |
| 103 frames_per_buffer_(frames_per_buffer) { |
| 100 } | 104 } |
| 101 | 105 |
| 102 WebRtcAudioRenderer::~WebRtcAudioRenderer() { | 106 WebRtcAudioRenderer::~WebRtcAudioRenderer() { |
| 103 DCHECK(thread_checker_.CalledOnValidThread()); | 107 DCHECK(thread_checker_.CalledOnValidThread()); |
| 104 DCHECK_EQ(state_, UNINITIALIZED); | 108 DCHECK_EQ(state_, UNINITIALIZED); |
| 105 buffer_.reset(); | 109 buffer_.reset(); |
| 106 } | 110 } |
| 107 | 111 |
| 108 bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { | 112 bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { |
| 109 DVLOG(1) << "WebRtcAudioRenderer::Initialize()"; | 113 DVLOG(1) << "WebRtcAudioRenderer::Initialize()"; |
| 110 DCHECK(thread_checker_.CalledOnValidThread()); | 114 DCHECK(thread_checker_.CalledOnValidThread()); |
| 111 base::AutoLock auto_lock(lock_); | 115 base::AutoLock auto_lock(lock_); |
| 112 DCHECK_EQ(state_, UNINITIALIZED); | 116 DCHECK_EQ(state_, UNINITIALIZED); |
| 113 DCHECK(source); | 117 DCHECK(source); |
| 114 DCHECK(!sink_.get()); | 118 DCHECK(!sink_.get()); |
| 115 DCHECK(!source_); | 119 DCHECK(!source_); |
| 116 | 120 |
| 117 // Use stereo output on all platforms exept Android. | 121 // Use stereo output on all platforms exept Android. |
| 118 media::ChannelLayout channel_layout = media::CHANNEL_LAYOUT_STEREO; | 122 media::ChannelLayout channel_layout = media::CHANNEL_LAYOUT_STEREO; |
| 119 #if defined(OS_ANDROID) | 123 #if defined(OS_ANDROID) |
| 120 DVLOG(1) << "Using mono audio output for Android"; | 124 DVLOG(1) << "Using mono audio output for Android"; |
| 121 channel_layout = media::CHANNEL_LAYOUT_MONO; | 125 channel_layout = media::CHANNEL_LAYOUT_MONO; |
| 122 #endif | 126 #endif |
| 123 // Ask the renderer for the default audio output hardware sample-rate. | 127 |
| 124 media::AudioHardwareConfig* hardware_config = | 128 // TODO(tommi,henrika): Maybe we should just change |sample_rate_| to be |
| 125 RenderThreadImpl::current()->GetAudioHardwareConfig(); | 129 // immutable and change its value instead of using a temporary? |
| 126 int sample_rate = hardware_config->GetOutputSampleRate(); | 130 int sample_rate = sample_rate_; |
| 127 DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; | 131 DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; |
| 128 | 132 |
| 129 // WebRTC does not yet support higher rates than 96000 on the client side | 133 // WebRTC does not yet support higher rates than 96000 on the client side |
| 130 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected, | 134 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected, |
| 131 // we change the rate to 48000 instead. The consequence is that the native | 135 // we change the rate to 48000 instead. The consequence is that the native |
| 132 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz | 136 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz |
| 133 // which will then be resampled by the audio converted on the browser side | 137 // which will then be resampled by the audio converted on the browser side |
| 134 // to match the native audio layer. | 138 // to match the native audio layer. |
| 135 if (sample_rate == 192000) { | 139 if (sample_rate == 192000) { |
| 136 DVLOG(1) << "Resampling from 48000 to 192000 is required"; | 140 DVLOG(1) << "Resampling from 48000 to 192000 is required"; |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 171 // We strive to open up using native parameters to achieve best possible | 175 // We strive to open up using native parameters to achieve best possible |
| 172 // performance and to ensure that no FIFO is needed on the browser side to | 176 // performance and to ensure that no FIFO is needed on the browser side to |
| 173 // match the client request. Any mismatch between the source and the sink is | 177 // match the client request. Any mismatch between the source and the sink is |
| 174 // taken care of in this class instead using a pull FIFO. | 178 // taken care of in this class instead using a pull FIFO. |
| 175 | 179 |
| 176 media::AudioParameters sink_params; | 180 media::AudioParameters sink_params; |
| 177 | 181 |
| 178 #if defined(OS_ANDROID) | 182 #if defined(OS_ANDROID) |
| 179 buffer_size = kDefaultOutputBufferSize; | 183 buffer_size = kDefaultOutputBufferSize; |
| 180 #else | 184 #else |
| 181 buffer_size = hardware_config->GetOutputBufferSize(); | 185 buffer_size = frames_per_buffer_; |
| 182 #endif | 186 #endif |
| 183 | 187 |
| 184 sink_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 188 sink_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 185 channel_layout, channels, 0, sample_rate, 16, buffer_size); | 189 channel_layout, channels, 0, sample_rate, 16, buffer_size); |
| 186 | 190 |
| 187 // Create a FIFO if re-buffering is required to match the source input with | 191 // Create a FIFO if re-buffering is required to match the source input with |
| 188 // the sink request. The source acts as provider here and the sink as | 192 // the sink request. The source acts as provider here and the sink as |
| 189 // consumer. | 193 // consumer. |
| 190 fifo_delay_milliseconds_ = 0; | 194 fifo_delay_milliseconds_ = 0; |
| 191 if (source_params.frames_per_buffer() != sink_params.frames_per_buffer()) { | 195 if (source_params.frames_per_buffer() != sink_params.frames_per_buffer()) { |
| 192 DVLOG(1) << "Rebuffering from " << source_params.frames_per_buffer() | 196 DVLOG(1) << "Rebuffering from " << source_params.frames_per_buffer() |
| 193 << " to " << sink_params.frames_per_buffer(); | 197 << " to " << sink_params.frames_per_buffer(); |
| 194 audio_fifo_.reset(new media::AudioPullFifo( | 198 audio_fifo_.reset(new media::AudioPullFifo( |
| 195 source_params.channels(), | 199 source_params.channels(), |
| 196 source_params.frames_per_buffer(), | 200 source_params.frames_per_buffer(), |
| 197 base::Bind( | 201 base::Bind( |
| 198 &WebRtcAudioRenderer::SourceCallback, | 202 &WebRtcAudioRenderer::SourceCallback, |
| 199 base::Unretained(this)))); | 203 base::Unretained(this)))); |
| 200 | 204 |
| 201 if (sink_params.frames_per_buffer() > source_params.frames_per_buffer()) { | 205 if (sink_params.frames_per_buffer() > source_params.frames_per_buffer()) { |
| 202 int frame_duration_milliseconds = base::Time::kMillisecondsPerSecond / | 206 int frame_duration_milliseconds = base::Time::kMillisecondsPerSecond / |
| 203 static_cast<double>(source_params.sample_rate()); | 207 static_cast<double>(source_params.sample_rate()); |
| 204 fifo_delay_milliseconds_ = (sink_params.frames_per_buffer() - | 208 fifo_delay_milliseconds_ = (sink_params.frames_per_buffer() - |
| 205 source_params.frames_per_buffer()) * frame_duration_milliseconds; | 209 source_params.frames_per_buffer()) * frame_duration_milliseconds; |
| 206 } | 210 } |
| 207 } | 211 } |
| 208 | 212 |
| 209 | |
| 210 // Allocate local audio buffers based on the parameters above. | 213 // Allocate local audio buffers based on the parameters above. |
| 211 // It is assumed that each audio sample contains 16 bits and each | 214 // It is assumed that each audio sample contains 16 bits and each |
| 212 // audio frame contains one or two audio samples depending on the | 215 // audio frame contains one or two audio samples depending on the |
| 213 // number of channels. | 216 // number of channels. |
| 214 buffer_.reset( | 217 buffer_.reset( |
| 215 new int16[source_params.frames_per_buffer() * source_params.channels()]); | 218 new int16[source_params.frames_per_buffer() * source_params.channels()]); |
| 216 | 219 |
| 217 source_ = source; | 220 source_ = source; |
| 218 source->SetRenderFormat(source_params); | 221 source->SetRenderFormat(source_params); |
| 219 | 222 |
| 220 // Configure the audio rendering client and start rendering. | 223 // Configure the audio rendering client and start rendering. |
| 221 sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_); | 224 sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_); |
| 222 sink_->Initialize(sink_params, this); | 225 |
| 226 // TODO(tommi): Rename InitializeUnifiedStream to rather reflect association |
| 227 // with a session. |
| 228 DCHECK_GE(session_id_, 0); |
| 229 sink_->InitializeUnifiedStream(sink_params, this, session_id_); |
| 230 |
| 223 sink_->Start(); | 231 sink_->Start(); |
| 224 | 232 |
| 225 // User must call Play() before any audio can be heard. | 233 // User must call Play() before any audio can be heard. |
| 226 state_ = PAUSED; | 234 state_ = PAUSED; |
| 227 | 235 |
| 228 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", | 236 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", |
| 229 source_params.channel_layout(), | 237 source_params.channel_layout(), |
| 230 media::CHANNEL_LAYOUT_MAX); | 238 media::CHANNEL_LAYOUT_MAX); |
| 231 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", | 239 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", |
| 232 source_params.frames_per_buffer(), | 240 source_params.frames_per_buffer(), |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 350 } | 358 } |
| 351 | 359 |
| 352 // De-interleave each channel and convert to 32-bit floating-point | 360 // De-interleave each channel and convert to 32-bit floating-point |
| 353 // with nominal range -1.0 -> +1.0 to match the callback format. | 361 // with nominal range -1.0 -> +1.0 to match the callback format. |
| 354 audio_bus->FromInterleaved(buffer_.get(), | 362 audio_bus->FromInterleaved(buffer_.get(), |
| 355 audio_bus->frames(), | 363 audio_bus->frames(), |
| 356 sizeof(buffer_[0])); | 364 sizeof(buffer_[0])); |
| 357 } | 365 } |
| 358 | 366 |
| 359 } // namespace content | 367 } // namespace content |
| OLD | NEW |