OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_renderer.h" | 5 #include "content/renderer/media/webrtc_audio_renderer.h" |
6 | 6 |
7 #include "base/logging.h" | 7 #include "base/logging.h" |
8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
9 #include "base/strings/string_util.h" | 9 #include "base/strings/string_util.h" |
10 #include "content/renderer/media/audio_device_factory.h" | 10 #include "content/renderer/media/audio_device_factory.h" |
11 #include "content/renderer/media/webrtc_audio_device_impl.h" | 11 #include "content/renderer/media/webrtc_audio_device_impl.h" |
12 #include "content/renderer/render_thread_impl.h" | |
13 #include "media/audio/audio_output_device.h" | 12 #include "media/audio/audio_output_device.h" |
14 #include "media/audio/audio_parameters.h" | 13 #include "media/audio/audio_parameters.h" |
15 #include "media/audio/sample_rates.h" | 14 #include "media/audio/sample_rates.h" |
16 #include "media/base/audio_hardware_config.h" | |
17 | 15 |
18 #if defined(OS_WIN) | 16 #if defined(OS_WIN) |
19 #include "base/win/windows_version.h" | 17 #include "base/win/windows_version.h" |
20 #include "media/audio/win/core_audio_util_win.h" | 18 #include "media/audio/win/core_audio_util_win.h" |
21 #endif | 19 #endif |
22 | 20 |
23 namespace content { | 21 namespace content { |
24 | 22 |
25 namespace { | 23 namespace { |
26 | 24 |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
83 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", | 81 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", |
84 afpb, kUnexpectedAudioBufferSize); | 82 afpb, kUnexpectedAudioBufferSize); |
85 } else { | 83 } else { |
86 // Report unexpected sample rates using a unique histogram name. | 84 // Report unexpected sample rates using a unique histogram name. |
87 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputFramesPerBufferUnexpected", param); | 85 UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputFramesPerBufferUnexpected", param); |
88 } | 86 } |
89 } | 87 } |
90 | 88 |
91 } // namespace | 89 } // namespace |
92 | 90 |
93 WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id) | 91 WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id, |
92 int session_id, | |
93 int sample_rate, | |
94 int frames_per_buffer) | |
94 : state_(UNINITIALIZED), | 95 : state_(UNINITIALIZED), |
95 source_render_view_id_(source_render_view_id), | 96 source_render_view_id_(source_render_view_id), |
97 session_id_(session_id), | |
96 source_(NULL), | 98 source_(NULL), |
97 play_ref_count_(0), | 99 play_ref_count_(0), |
98 audio_delay_milliseconds_(0), | 100 audio_delay_milliseconds_(0), |
99 fifo_delay_milliseconds_(0) { | 101 fifo_delay_milliseconds_(0), |
102 sample_rate_(sample_rate), | |
103 frames_per_buffer_(frames_per_buffer) { | |
100 } | 104 } |
101 | 105 |
102 WebRtcAudioRenderer::~WebRtcAudioRenderer() { | 106 WebRtcAudioRenderer::~WebRtcAudioRenderer() { |
103 DCHECK(thread_checker_.CalledOnValidThread()); | 107 DCHECK(thread_checker_.CalledOnValidThread()); |
104 DCHECK_EQ(state_, UNINITIALIZED); | 108 DCHECK_EQ(state_, UNINITIALIZED); |
105 buffer_.reset(); | 109 buffer_.reset(); |
106 } | 110 } |
107 | 111 |
108 bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { | 112 bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { |
109 DVLOG(1) << "WebRtcAudioRenderer::Initialize()"; | 113 DVLOG(1) << "WebRtcAudioRenderer::Initialize()"; |
110 DCHECK(thread_checker_.CalledOnValidThread()); | 114 DCHECK(thread_checker_.CalledOnValidThread()); |
111 base::AutoLock auto_lock(lock_); | 115 base::AutoLock auto_lock(lock_); |
112 DCHECK_EQ(state_, UNINITIALIZED); | 116 DCHECK_EQ(state_, UNINITIALIZED); |
113 DCHECK(source); | 117 DCHECK(source); |
114 DCHECK(!sink_.get()); | 118 DCHECK(!sink_.get()); |
115 DCHECK(!source_); | 119 DCHECK(!source_); |
116 | 120 |
117 // Use stereo output on all platforms exept Android. | 121 // Use stereo output on all platforms exept Android. |
118 media::ChannelLayout channel_layout = media::CHANNEL_LAYOUT_STEREO; | 122 media::ChannelLayout channel_layout = media::CHANNEL_LAYOUT_STEREO; |
119 #if defined(OS_ANDROID) | 123 #if defined(OS_ANDROID) |
120 DVLOG(1) << "Using mono audio output for Android"; | 124 DVLOG(1) << "Using mono audio output for Android"; |
121 channel_layout = media::CHANNEL_LAYOUT_MONO; | 125 channel_layout = media::CHANNEL_LAYOUT_MONO; |
122 #endif | 126 #endif |
123 // Ask the renderer for the default audio output hardware sample-rate. | 127 |
124 media::AudioHardwareConfig* hardware_config = | 128 int sample_rate = sample_rate_; |
Jói
2013/09/06 14:49:26
Maybe comment on why you don't use sample_rate_ be
tommi (sloooow) - chröme
2013/09/06 16:56:54
I added a todo for me and Henrik to look into chan
| |
125 RenderThreadImpl::current()->GetAudioHardwareConfig(); | |
126 int sample_rate = hardware_config->GetOutputSampleRate(); | |
127 DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; | 129 DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; |
128 | 130 |
129 // WebRTC does not yet support higher rates than 96000 on the client side | 131 // WebRTC does not yet support higher rates than 96000 on the client side |
130 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected, | 132 // and 48000 is the preferred sample rate. Therefore, if 192000 is detected, |
131 // we change the rate to 48000 instead. The consequence is that the native | 133 // we change the rate to 48000 instead. The consequence is that the native |
132 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz | 134 // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz |
133 // which will then be resampled by the audio converted on the browser side | 135 // which will then be resampled by the audio converted on the browser side |
134 // to match the native audio layer. | 136 // to match the native audio layer. |
135 if (sample_rate == 192000) { | 137 if (sample_rate == 192000) { |
136 DVLOG(1) << "Resampling from 48000 to 192000 is required"; | 138 DVLOG(1) << "Resampling from 48000 to 192000 is required"; |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
171 // We strive to open up using native parameters to achieve best possible | 173 // We strive to open up using native parameters to achieve best possible |
172 // performance and to ensure that no FIFO is needed on the browser side to | 174 // performance and to ensure that no FIFO is needed on the browser side to |
173 // match the client request. Any mismatch between the source and the sink is | 175 // match the client request. Any mismatch between the source and the sink is |
174 // taken care of in this class instead using a pull FIFO. | 176 // taken care of in this class instead using a pull FIFO. |
175 | 177 |
176 media::AudioParameters sink_params; | 178 media::AudioParameters sink_params; |
177 | 179 |
178 #if defined(OS_ANDROID) | 180 #if defined(OS_ANDROID) |
179 buffer_size = kDefaultOutputBufferSize; | 181 buffer_size = kDefaultOutputBufferSize; |
180 #else | 182 #else |
181 buffer_size = hardware_config->GetOutputBufferSize(); | 183 buffer_size = frames_per_buffer_; |
182 #endif | 184 #endif |
183 | 185 |
184 sink_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 186 sink_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
185 channel_layout, channels, 0, sample_rate, 16, buffer_size); | 187 channel_layout, channels, 0, sample_rate, 16, buffer_size); |
186 | 188 |
187 // Create a FIFO if re-buffering is required to match the source input with | 189 // Create a FIFO if re-buffering is required to match the source input with |
188 // the sink request. The source acts as provider here and the sink as | 190 // the sink request. The source acts as provider here and the sink as |
189 // consumer. | 191 // consumer. |
190 fifo_delay_milliseconds_ = 0; | 192 fifo_delay_milliseconds_ = 0; |
191 if (source_params.frames_per_buffer() != sink_params.frames_per_buffer()) { | 193 if (source_params.frames_per_buffer() != sink_params.frames_per_buffer()) { |
192 DVLOG(1) << "Rebuffering from " << source_params.frames_per_buffer() | 194 DVLOG(1) << "Rebuffering from " << source_params.frames_per_buffer() |
193 << " to " << sink_params.frames_per_buffer(); | 195 << " to " << sink_params.frames_per_buffer(); |
194 audio_fifo_.reset(new media::AudioPullFifo( | 196 audio_fifo_.reset(new media::AudioPullFifo( |
195 source_params.channels(), | 197 source_params.channels(), |
196 source_params.frames_per_buffer(), | 198 source_params.frames_per_buffer(), |
197 base::Bind( | 199 base::Bind( |
198 &WebRtcAudioRenderer::SourceCallback, | 200 &WebRtcAudioRenderer::SourceCallback, |
199 base::Unretained(this)))); | 201 base::Unretained(this)))); |
200 | 202 |
201 if (sink_params.frames_per_buffer() > source_params.frames_per_buffer()) { | 203 if (sink_params.frames_per_buffer() > source_params.frames_per_buffer()) { |
202 int frame_duration_milliseconds = base::Time::kMillisecondsPerSecond / | 204 int frame_duration_milliseconds = base::Time::kMillisecondsPerSecond / |
203 static_cast<double>(source_params.sample_rate()); | 205 static_cast<double>(source_params.sample_rate()); |
204 fifo_delay_milliseconds_ = (sink_params.frames_per_buffer() - | 206 fifo_delay_milliseconds_ = (sink_params.frames_per_buffer() - |
205 source_params.frames_per_buffer()) * frame_duration_milliseconds; | 207 source_params.frames_per_buffer()) * frame_duration_milliseconds; |
206 } | 208 } |
207 } | 209 } |
208 | 210 |
209 | |
210 // Allocate local audio buffers based on the parameters above. | 211 // Allocate local audio buffers based on the parameters above. |
211 // It is assumed that each audio sample contains 16 bits and each | 212 // It is assumed that each audio sample contains 16 bits and each |
212 // audio frame contains one or two audio samples depending on the | 213 // audio frame contains one or two audio samples depending on the |
213 // number of channels. | 214 // number of channels. |
214 buffer_.reset( | 215 buffer_.reset( |
215 new int16[source_params.frames_per_buffer() * source_params.channels()]); | 216 new int16[source_params.frames_per_buffer() * source_params.channels()]); |
216 | 217 |
217 source_ = source; | 218 source_ = source; |
218 source->SetRenderFormat(source_params); | 219 source->SetRenderFormat(source_params); |
219 | 220 |
220 // Configure the audio rendering client and start rendering. | 221 // Configure the audio rendering client and start rendering. |
221 sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_); | 222 sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_); |
222 sink_->Initialize(sink_params, this); | 223 |
224 if (session_id_ != -1) { | |
Jói
2013/09/06 14:49:26
You used 0 elsewhere, looks like you want -1 every
tommi (sloooow) - chröme
2013/09/06 16:56:54
Removed the if and added a DCHECK_GE(session_id_,
| |
225 // TODO(tommi): Rename InitializeUnifiedStream to rather reflect association | |
226 // with a session. | |
227 sink_->InitializeUnifiedStream(sink_params, this, session_id_); | |
228 } else { | |
229 sink_->Initialize(sink_params, this); | |
230 } | |
231 | |
223 sink_->Start(); | 232 sink_->Start(); |
224 | 233 |
225 // User must call Play() before any audio can be heard. | 234 // User must call Play() before any audio can be heard. |
226 state_ = PAUSED; | 235 state_ = PAUSED; |
227 | 236 |
228 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", | 237 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", |
229 source_params.channel_layout(), | 238 source_params.channel_layout(), |
230 media::CHANNEL_LAYOUT_MAX); | 239 media::CHANNEL_LAYOUT_MAX); |
231 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", | 240 UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", |
232 source_params.frames_per_buffer(), | 241 source_params.frames_per_buffer(), |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
350 } | 359 } |
351 | 360 |
352 // De-interleave each channel and convert to 32-bit floating-point | 361 // De-interleave each channel and convert to 32-bit floating-point |
353 // with nominal range -1.0 -> +1.0 to match the callback format. | 362 // with nominal range -1.0 -> +1.0 to match the callback format. |
354 audio_bus->FromInterleaved(buffer_.get(), | 363 audio_bus->FromInterleaved(buffer_.get(), |
355 audio_bus->frames(), | 364 audio_bus->frames(), |
356 sizeof(buffer_[0])); | 365 sizeof(buffer_[0])); |
357 } | 366 } |
358 | 367 |
359 } // namespace content | 368 } // namespace content |
OLD | NEW |