OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_device_impl.h" | 5 #include "content/renderer/media/webrtc_audio_device_impl.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
9 #include "base/string_util.h" | 9 #include "base/string_util.h" |
10 #include "base/win/windows_version.h" | 10 #include "base/win/windows_version.h" |
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
167 | 167 |
168 int32_t WebRtcAudioDeviceImpl::Release() { | 168 int32_t WebRtcAudioDeviceImpl::Release() { |
169 int ret = base::subtle::Barrier_AtomicIncrement(&ref_count_, -1); | 169 int ret = base::subtle::Barrier_AtomicIncrement(&ref_count_, -1); |
170 if (ret == 0) { | 170 if (ret == 0) { |
171 delete this; | 171 delete this; |
172 } | 172 } |
173 return ret; | 173 return ret; |
174 } | 174 } |
175 | 175 |
176 int WebRtcAudioDeviceImpl::Render( | 176 int WebRtcAudioDeviceImpl::Render( |
177 const std::vector<float*>& audio_data, | 177 media::AudioBus* audio_bus, |
178 int number_of_frames, | |
179 int audio_delay_milliseconds) { | 178 int audio_delay_milliseconds) { |
180 DCHECK_LE(number_of_frames, output_buffer_size()); | 179 DCHECK_LE(audio_bus->frames(), output_buffer_size()); |
181 | 180 |
182 { | 181 { |
183 base::AutoLock auto_lock(lock_); | 182 base::AutoLock auto_lock(lock_); |
184 // Store the reported audio delay locally. | 183 // Store the reported audio delay locally. |
185 output_delay_ms_ = audio_delay_milliseconds; | 184 output_delay_ms_ = audio_delay_milliseconds; |
186 } | 185 } |
187 | 186 |
188 const int channels = audio_data.size(); | 187 const int channels = audio_bus->channels(); |
189 DCHECK_LE(channels, output_channels()); | 188 DCHECK_LE(channels, output_channels()); |
190 | 189 |
191 int samples_per_sec = output_sample_rate(); | 190 int samples_per_sec = output_sample_rate(); |
192 if (samples_per_sec == 44100) { | 191 if (samples_per_sec == 44100) { |
193 // Even if the hardware runs at 44.1kHz, we use 44.0 internally. | 192 // Even if the hardware runs at 44.1kHz, we use 44.0 internally. |
194 samples_per_sec = 44000; | 193 samples_per_sec = 44000; |
195 } | 194 } |
196 int samples_per_10_msec = (samples_per_sec / 100); | 195 int samples_per_10_msec = (samples_per_sec / 100); |
197 const int bytes_per_10_msec = | 196 const int bytes_per_10_msec = |
198 channels * samples_per_10_msec * bytes_per_sample_; | 197 channels * samples_per_10_msec * bytes_per_sample_; |
199 | 198 |
200 uint32_t num_audio_samples = 0; | 199 uint32_t num_audio_samples = 0; |
201 int accumulated_audio_samples = 0; | 200 int accumulated_audio_samples = 0; |
202 | 201 |
203 char* audio_byte_buffer = reinterpret_cast<char*>(output_buffer_.get()); | 202 char* audio_byte_buffer = reinterpret_cast<char*>(output_buffer_.get()); |
204 | 203 |
205 // Get audio samples in blocks of 10 milliseconds from the registered | 204 // Get audio samples in blocks of 10 milliseconds from the registered |
206 // webrtc::AudioTransport source. Keep reading until our internal buffer | 205 // webrtc::AudioTransport source. Keep reading until our internal buffer |
207 // is full. | 206 // is full. |
208 while (accumulated_audio_samples < number_of_frames) { | 207 while (accumulated_audio_samples < audio_bus->frames()) { |
209 // Get 10ms and append output to temporary byte buffer. | 208 // Get 10ms and append output to temporary byte buffer. |
210 audio_transport_callback_->NeedMorePlayData(samples_per_10_msec, | 209 audio_transport_callback_->NeedMorePlayData(samples_per_10_msec, |
211 bytes_per_sample_, | 210 bytes_per_sample_, |
212 channels, | 211 channels, |
213 samples_per_sec, | 212 samples_per_sec, |
214 audio_byte_buffer, | 213 audio_byte_buffer, |
215 num_audio_samples); | 214 num_audio_samples); |
216 accumulated_audio_samples += num_audio_samples; | 215 accumulated_audio_samples += num_audio_samples; |
217 audio_byte_buffer += bytes_per_10_msec; | 216 audio_byte_buffer += bytes_per_10_msec; |
218 } | 217 } |
219 | 218 |
220 // Deinterleave each channel and convert to 32-bit floating-point | 219 // Deinterleave each channel and convert to 32-bit floating-point |
221 // with nominal range -1.0 -> +1.0 to match the callback format. | 220 // with nominal range -1.0 -> +1.0 to match the callback format. |
222 for (int channel_index = 0; channel_index < channels; ++channel_index) { | 221 for (int channel_index = 0; channel_index < channels; ++channel_index) { |
223 media::DeinterleaveAudioChannel( | 222 media::DeinterleaveAudioChannel( |
224 output_buffer_.get(), | 223 output_buffer_.get(), |
225 audio_data[channel_index], | 224 audio_bus->channel(channel_index), |
226 channels, | 225 channels, |
227 channel_index, | 226 channel_index, |
228 bytes_per_sample_, | 227 bytes_per_sample_, |
229 number_of_frames); | 228 audio_bus->frames()); |
230 } | 229 } |
231 return number_of_frames; | 230 return audio_bus->frames(); |
232 } | 231 } |
233 | 232 |
234 void WebRtcAudioDeviceImpl::OnRenderError() { | 233 void WebRtcAudioDeviceImpl::OnRenderError() { |
235 DCHECK_EQ(MessageLoop::current(), ChildProcess::current()->io_message_loop()); | 234 DCHECK_EQ(MessageLoop::current(), ChildProcess::current()->io_message_loop()); |
236 // TODO(henrika): Implement error handling. | 235 // TODO(henrika): Implement error handling. |
237 LOG(ERROR) << "OnRenderError()"; | 236 LOG(ERROR) << "OnRenderError()"; |
238 } | 237 } |
239 | 238 |
240 void WebRtcAudioDeviceImpl::Capture(const std::vector<float*>& audio_data, | 239 void WebRtcAudioDeviceImpl::Capture(media::AudioBus* audio_bus, |
241 int number_of_frames, | |
242 int audio_delay_milliseconds, | 240 int audio_delay_milliseconds, |
243 double volume) { | 241 double volume) { |
244 DCHECK_LE(number_of_frames, input_buffer_size()); | 242 DCHECK_LE(audio_bus->frames(), input_buffer_size()); |
245 #if defined(OS_WIN) || defined(OS_MACOSX) | 243 #if defined(OS_WIN) || defined(OS_MACOSX) |
246 DCHECK_LE(volume, 1.0); | 244 DCHECK_LE(volume, 1.0); |
247 #elif defined(OS_LINUX) || defined(OS_OPENBSD) | 245 #elif defined(OS_LINUX) || defined(OS_OPENBSD) |
248 // We have a special situation on Linux where the microphone volume can be | 246 // We have a special situation on Linux where the microphone volume can be |
249 // "higher than maximum". The input volume slider in the sound preference | 247 // "higher than maximum". The input volume slider in the sound preference |
250 // allows the user to set a scaling that is higher than 100%. It means that | 248 // allows the user to set a scaling that is higher than 100%. It means that |
251 // even if the reported maximum levels is N, the actual microphone level can | 249 // even if the reported maximum levels is N, the actual microphone level can |
252 // go up to 1.5*N and that corresponds to a normalized |volume| of 1.5. | 250 // go up to 1.5*N and that corresponds to a normalized |volume| of 1.5. |
253 DCHECK_LE(volume, 1.5); | 251 DCHECK_LE(volume, 1.5); |
254 #endif | 252 #endif |
255 | 253 |
256 int output_delay_ms = 0; | 254 int output_delay_ms = 0; |
257 { | 255 { |
258 base::AutoLock auto_lock(lock_); | 256 base::AutoLock auto_lock(lock_); |
259 // Store the reported audio delay locally. | 257 // Store the reported audio delay locally. |
260 input_delay_ms_ = audio_delay_milliseconds; | 258 input_delay_ms_ = audio_delay_milliseconds; |
261 output_delay_ms = output_delay_ms_; | 259 output_delay_ms = output_delay_ms_; |
262 } | 260 } |
263 | 261 |
264 const int channels = audio_data.size(); | 262 const int channels = audio_bus->channels(); |
265 DCHECK_LE(channels, input_channels()); | 263 DCHECK_LE(channels, input_channels()); |
266 uint32_t new_mic_level = 0; | 264 uint32_t new_mic_level = 0; |
267 | 265 |
268 // Interleave, scale, and clip input to int and store result in | 266 // Interleave, scale, and clip input to int and store result in |
269 // a local byte buffer. | 267 // a local byte buffer. |
270 media::InterleaveFloatToInt(audio_data, | 268 media::InterleaveFloatToInt(audio_bus, |
271 input_buffer_.get(), | 269 input_buffer_.get(), |
272 number_of_frames, | 270 audio_bus->frames(), |
273 input_audio_parameters_.bits_per_sample() / 8); | 271 input_audio_parameters_.bits_per_sample() / 8); |
274 | 272 |
275 int samples_per_sec = input_sample_rate(); | 273 int samples_per_sec = input_sample_rate(); |
276 if (samples_per_sec == 44100) { | 274 if (samples_per_sec == 44100) { |
277 // Even if the hardware runs at 44.1kHz, we use 44.0 internally. | 275 // Even if the hardware runs at 44.1kHz, we use 44.0 internally. |
278 samples_per_sec = 44000; | 276 samples_per_sec = 44000; |
279 } | 277 } |
280 const int samples_per_10_msec = (samples_per_sec / 100); | 278 const int samples_per_10_msec = (samples_per_sec / 100); |
281 const int bytes_per_10_msec = | 279 const int bytes_per_10_msec = |
282 channels * samples_per_10_msec * bytes_per_sample_; | 280 channels * samples_per_10_msec * bytes_per_sample_; |
283 int accumulated_audio_samples = 0; | 281 int accumulated_audio_samples = 0; |
284 | 282 |
285 char* audio_byte_buffer = reinterpret_cast<char*>(input_buffer_.get()); | 283 char* audio_byte_buffer = reinterpret_cast<char*>(input_buffer_.get()); |
286 | 284 |
287 // Map internal volume range of [0.0, 1.0] into [0, 255] used by the | 285 // Map internal volume range of [0.0, 1.0] into [0, 255] used by the |
288 // webrtc::VoiceEngine. | 286 // webrtc::VoiceEngine. |
289 uint32_t current_mic_level = static_cast<uint32_t>(volume * kMaxVolumeLevel); | 287 uint32_t current_mic_level = static_cast<uint32_t>(volume * kMaxVolumeLevel); |
290 | 288 |
291 // Write audio samples in blocks of 10 milliseconds to the registered | 289 // Write audio samples in blocks of 10 milliseconds to the registered |
292 // webrtc::AudioTransport sink. Keep writing until our internal byte | 290 // webrtc::AudioTransport sink. Keep writing until our internal byte |
293 // buffer is empty. | 291 // buffer is empty. |
294 while (accumulated_audio_samples < number_of_frames) { | 292 while (accumulated_audio_samples < audio_bus->frames()) { |
295 // Deliver 10ms of recorded 16-bit linear PCM audio. | 293 // Deliver 10ms of recorded 16-bit linear PCM audio. |
296 audio_transport_callback_->RecordedDataIsAvailable( | 294 audio_transport_callback_->RecordedDataIsAvailable( |
297 audio_byte_buffer, | 295 audio_byte_buffer, |
298 samples_per_10_msec, | 296 samples_per_10_msec, |
299 bytes_per_sample_, | 297 bytes_per_sample_, |
300 channels, | 298 channels, |
301 samples_per_sec, | 299 samples_per_sec, |
302 input_delay_ms_ + output_delay_ms, | 300 input_delay_ms_ + output_delay_ms, |
303 0, // TODO(henrika): |clock_drift| parameter is not utilized today. | 301 0, // TODO(henrika): |clock_drift| parameter is not utilized today. |
304 current_mic_level, | 302 current_mic_level, |
(...skipping 863 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1168 } | 1166 } |
1169 | 1167 |
1170 int32_t WebRtcAudioDeviceImpl::GetLoudspeakerStatus(bool* enabled) const { | 1168 int32_t WebRtcAudioDeviceImpl::GetLoudspeakerStatus(bool* enabled) const { |
1171 NOTIMPLEMENTED(); | 1169 NOTIMPLEMENTED(); |
1172 return -1; | 1170 return -1; |
1173 } | 1171 } |
1174 | 1172 |
1175 void WebRtcAudioDeviceImpl::SetSessionId(int session_id) { | 1173 void WebRtcAudioDeviceImpl::SetSessionId(int session_id) { |
1176 session_id_ = session_id; | 1174 session_id_ = session_id; |
1177 } | 1175 } |
OLD | NEW |