OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <vector> | 5 #include <vector> |
6 | 6 |
7 #include "base/files/file_path.h" | 7 #include "base/files/file_path.h" |
8 #include "base/files/file_util.h" | 8 #include "base/files/file_util.h" |
9 #include "base/logging.h" | 9 #include "base/logging.h" |
10 #include "base/memory/aligned_memory.h" | 10 #include "base/memory/aligned_memory.h" |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
126 | 126 |
127 media::AudioBus* processed_data = nullptr; | 127 media::AudioBus* processed_data = nullptr; |
128 base::TimeDelta capture_delay; | 128 base::TimeDelta capture_delay; |
129 int new_volume = 0; | 129 int new_volume = 0; |
130 while (audio_processor->ProcessAndConsumeData( | 130 while (audio_processor->ProcessAndConsumeData( |
131 255, false, &processed_data, &capture_delay, &new_volume)) { | 131 255, false, &processed_data, &capture_delay, &new_volume)) { |
132 EXPECT_TRUE(processed_data); | 132 EXPECT_TRUE(processed_data); |
133 EXPECT_NEAR(input_capture_delay.InMillisecondsF(), | 133 EXPECT_NEAR(input_capture_delay.InMillisecondsF(), |
134 capture_delay.InMillisecondsF(), | 134 capture_delay.InMillisecondsF(), |
135 output_buffer_duration.InMillisecondsF()); | 135 output_buffer_duration.InMillisecondsF()); |
136 EXPECT_EQ(audio_processor->OutputFormat().sample_rate(), | 136 EXPECT_EQ(expected_output_sample_rate, |
137 expected_output_sample_rate); | 137 audio_processor->OutputFormat().sample_rate()); |
138 EXPECT_EQ(audio_processor->OutputFormat().channels(), | 138 EXPECT_EQ(expected_output_channels, |
139 expected_output_channels); | 139 audio_processor->OutputFormat().channels()); |
140 EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(), | 140 EXPECT_EQ(expected_output_buffer_size, |
141 expected_output_buffer_size); | 141 audio_processor->OutputFormat().frames_per_buffer()); |
142 } | 142 } |
143 | 143 |
144 data_ptr += params.frames_per_buffer() * params.channels(); | 144 data_ptr += params.frames_per_buffer() * params.channels(); |
145 } | 145 } |
146 } | 146 } |
147 | 147 |
148 void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) { | 148 void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) { |
149 webrtc::AudioProcessing* audio_processing = | 149 webrtc::AudioProcessing* audio_processing = |
150 audio_processor->audio_processing_.get(); | 150 audio_processor->audio_processing_.get(); |
151 #if defined(OS_ANDROID) | 151 #if defined(OS_ANDROID) |
(...skipping 22 matching lines...) Expand all Loading... |
174 #else | 174 #else |
175 EXPECT_TRUE(audio_processing->gain_control()->mode() == | 175 EXPECT_TRUE(audio_processing->gain_control()->mode() == |
176 webrtc::GainControl::kAdaptiveAnalog); | 176 webrtc::GainControl::kAdaptiveAnalog); |
177 EXPECT_TRUE(audio_processing->voice_detection()->is_enabled()); | 177 EXPECT_TRUE(audio_processing->voice_detection()->is_enabled()); |
178 EXPECT_TRUE(audio_processing->voice_detection()->likelihood() == | 178 EXPECT_TRUE(audio_processing->voice_detection()->likelihood() == |
179 webrtc::VoiceDetection::kVeryLowLikelihood); | 179 webrtc::VoiceDetection::kVeryLowLikelihood); |
180 #endif | 180 #endif |
181 } | 181 } |
182 | 182 |
183 media::AudioParameters params_; | 183 media::AudioParameters params_; |
| 184 MediaStreamDevice::AudioDeviceParameters input_device_params_; |
184 }; | 185 }; |
185 | 186 |
186 // Test crashing with ASAN on Android. crbug.com/468762 | 187 // Test crashing with ASAN on Android. crbug.com/468762 |
187 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) | 188 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) |
188 #define MAYBE_WithAudioProcessing DISABLED_WithAudioProcessing | 189 #define MAYBE_WithAudioProcessing DISABLED_WithAudioProcessing |
189 #else | 190 #else |
190 #define MAYBE_WithAudioProcessing WithAudioProcessing | 191 #define MAYBE_WithAudioProcessing WithAudioProcessing |
191 #endif | 192 #endif |
192 TEST_F(MediaStreamAudioProcessorTest, MAYBE_WithAudioProcessing) { | 193 TEST_F(MediaStreamAudioProcessorTest, MAYBE_WithAudioProcessing) { |
193 MockMediaConstraintFactory constraint_factory; | 194 MockMediaConstraintFactory constraint_factory; |
194 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 195 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
195 new WebRtcAudioDeviceImpl()); | 196 new WebRtcAudioDeviceImpl()); |
196 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 197 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
197 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 198 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
198 constraint_factory.CreateWebMediaConstraints(), 0, | 199 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
199 webrtc_audio_device.get())); | 200 webrtc_audio_device.get())); |
200 EXPECT_TRUE(audio_processor->has_audio_processing()); | 201 EXPECT_TRUE(audio_processor->has_audio_processing()); |
201 audio_processor->OnCaptureFormatChanged(params_); | 202 audio_processor->OnCaptureFormatChanged(params_); |
202 VerifyDefaultComponents(audio_processor.get()); | 203 VerifyDefaultComponents(audio_processor.get()); |
203 | 204 |
204 ProcessDataAndVerifyFormat(audio_processor.get(), | 205 ProcessDataAndVerifyFormat(audio_processor.get(), |
205 kAudioProcessingSampleRate, | 206 kAudioProcessingSampleRate, |
206 kAudioProcessingNumberOfChannel, | 207 kAudioProcessingNumberOfChannel, |
207 kAudioProcessingSampleRate / 100); | 208 kAudioProcessingSampleRate / 100); |
208 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 209 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
209 // |audio_processor|. | 210 // |audio_processor|. |
210 audio_processor = NULL; | 211 audio_processor = NULL; |
211 } | 212 } |
212 | 213 |
213 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { | 214 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { |
214 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 215 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
215 new WebRtcAudioDeviceImpl()); | 216 new WebRtcAudioDeviceImpl()); |
216 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source. | 217 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source. |
217 MockMediaConstraintFactory tab_constraint_factory; | 218 MockMediaConstraintFactory tab_constraint_factory; |
218 const std::string tab_string = kMediaStreamSourceTab; | 219 const std::string tab_string = kMediaStreamSourceTab; |
219 tab_constraint_factory.AddMandatory(kMediaStreamSource, | 220 tab_constraint_factory.AddMandatory(kMediaStreamSource, |
220 tab_string); | 221 tab_string); |
221 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 222 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
222 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 223 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
223 tab_constraint_factory.CreateWebMediaConstraints(), 0, | 224 tab_constraint_factory.CreateWebMediaConstraints(), |
224 webrtc_audio_device.get())); | 225 input_device_params_, webrtc_audio_device.get())); |
225 EXPECT_FALSE(audio_processor->has_audio_processing()); | 226 EXPECT_FALSE(audio_processor->has_audio_processing()); |
226 audio_processor->OnCaptureFormatChanged(params_); | 227 audio_processor->OnCaptureFormatChanged(params_); |
227 | 228 |
228 ProcessDataAndVerifyFormat(audio_processor.get(), | 229 ProcessDataAndVerifyFormat(audio_processor.get(), |
229 params_.sample_rate(), | 230 params_.sample_rate(), |
230 params_.channels(), | 231 params_.channels(), |
231 params_.sample_rate() / 100); | 232 params_.sample_rate() / 100); |
232 | 233 |
233 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem | 234 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem |
234 // source. | 235 // source. |
235 MockMediaConstraintFactory system_constraint_factory; | 236 MockMediaConstraintFactory system_constraint_factory; |
236 const std::string system_string = kMediaStreamSourceSystem; | 237 const std::string system_string = kMediaStreamSourceSystem; |
237 system_constraint_factory.AddMandatory(kMediaStreamSource, | 238 system_constraint_factory.AddMandatory(kMediaStreamSource, |
238 system_string); | 239 system_string); |
239 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 240 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
240 system_constraint_factory.CreateWebMediaConstraints(), 0, | 241 system_constraint_factory.CreateWebMediaConstraints(), |
241 webrtc_audio_device.get()); | 242 input_device_params_, webrtc_audio_device.get()); |
242 EXPECT_FALSE(audio_processor->has_audio_processing()); | 243 EXPECT_FALSE(audio_processor->has_audio_processing()); |
243 | 244 |
244 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 245 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
245 // |audio_processor|. | 246 // |audio_processor|. |
246 audio_processor = NULL; | 247 audio_processor = NULL; |
247 } | 248 } |
248 | 249 |
249 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) { | 250 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) { |
250 // Turn off the default constraints and pass it to MediaStreamAudioProcessor. | 251 // Turn off the default constraints and pass it to MediaStreamAudioProcessor. |
251 MockMediaConstraintFactory constraint_factory; | 252 MockMediaConstraintFactory constraint_factory; |
252 constraint_factory.DisableDefaultAudioConstraints(); | 253 constraint_factory.DisableDefaultAudioConstraints(); |
253 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 254 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
254 new WebRtcAudioDeviceImpl()); | 255 new WebRtcAudioDeviceImpl()); |
255 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 256 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
256 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 257 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
257 constraint_factory.CreateWebMediaConstraints(), 0, | 258 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
258 webrtc_audio_device.get())); | 259 webrtc_audio_device.get())); |
259 EXPECT_FALSE(audio_processor->has_audio_processing()); | 260 EXPECT_FALSE(audio_processor->has_audio_processing()); |
260 audio_processor->OnCaptureFormatChanged(params_); | 261 audio_processor->OnCaptureFormatChanged(params_); |
261 | 262 |
262 ProcessDataAndVerifyFormat(audio_processor.get(), | 263 ProcessDataAndVerifyFormat(audio_processor.get(), |
263 params_.sample_rate(), | 264 params_.sample_rate(), |
264 params_.channels(), | 265 params_.channels(), |
265 params_.sample_rate() / 100); | 266 params_.sample_rate() / 100); |
266 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 267 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
267 // |audio_processor|. | 268 // |audio_processor|. |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
377 #define MAYBE_TestAllSampleRates DISABLED_TestAllSampleRates | 378 #define MAYBE_TestAllSampleRates DISABLED_TestAllSampleRates |
378 #else | 379 #else |
379 #define MAYBE_TestAllSampleRates TestAllSampleRates | 380 #define MAYBE_TestAllSampleRates TestAllSampleRates |
380 #endif | 381 #endif |
381 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestAllSampleRates) { | 382 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestAllSampleRates) { |
382 MockMediaConstraintFactory constraint_factory; | 383 MockMediaConstraintFactory constraint_factory; |
383 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 384 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
384 new WebRtcAudioDeviceImpl()); | 385 new WebRtcAudioDeviceImpl()); |
385 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 386 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
386 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 387 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
387 constraint_factory.CreateWebMediaConstraints(), 0, | 388 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
388 webrtc_audio_device.get())); | 389 webrtc_audio_device.get())); |
389 EXPECT_TRUE(audio_processor->has_audio_processing()); | 390 EXPECT_TRUE(audio_processor->has_audio_processing()); |
390 | 391 |
391 static const int kSupportedSampleRates[] = | 392 static const int kSupportedSampleRates[] = |
392 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 }; | 393 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 }; |
393 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) { | 394 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) { |
394 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ? | 395 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ? |
395 kSupportedSampleRates[i] / 100 : 128; | 396 kSupportedSampleRates[i] / 100 : 128; |
396 media::AudioParameters params( | 397 media::AudioParameters params( |
397 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 398 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
(...skipping 20 matching lines...) Expand all Loading... |
418 base::MessageLoopForUI message_loop; | 419 base::MessageLoopForUI message_loop; |
419 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_( | 420 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_( |
420 new AecDumpMessageFilter(message_loop.task_runner(), | 421 new AecDumpMessageFilter(message_loop.task_runner(), |
421 message_loop.task_runner())); | 422 message_loop.task_runner())); |
422 | 423 |
423 MockMediaConstraintFactory constraint_factory; | 424 MockMediaConstraintFactory constraint_factory; |
424 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 425 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
425 new WebRtcAudioDeviceImpl()); | 426 new WebRtcAudioDeviceImpl()); |
426 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 427 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
427 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 428 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
428 constraint_factory.CreateWebMediaConstraints(), 0, | 429 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
429 webrtc_audio_device.get())); | 430 webrtc_audio_device.get())); |
430 | 431 |
431 EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get()); | 432 EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get()); |
432 | 433 |
433 audio_processor = NULL; | 434 audio_processor = NULL; |
434 } | 435 } |
435 | 436 |
436 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) { | 437 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) { |
437 // Set up the correct constraints to turn off the audio processing and turn | 438 // Set up the correct constraints to turn off the audio processing and turn |
438 // on the stereo channels mirroring. | 439 // on the stereo channels mirroring. |
439 MockMediaConstraintFactory constraint_factory; | 440 MockMediaConstraintFactory constraint_factory; |
440 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation, | 441 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation, |
441 false); | 442 false); |
442 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring, | 443 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring, |
443 true); | 444 true); |
444 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 445 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
445 new WebRtcAudioDeviceImpl()); | 446 new WebRtcAudioDeviceImpl()); |
446 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 447 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
447 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 448 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
448 constraint_factory.CreateWebMediaConstraints(), 0, | 449 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
449 webrtc_audio_device.get())); | 450 webrtc_audio_device.get())); |
450 EXPECT_FALSE(audio_processor->has_audio_processing()); | 451 EXPECT_FALSE(audio_processor->has_audio_processing()); |
451 const media::AudioParameters source_params( | 452 const media::AudioParameters source_params( |
452 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 453 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
453 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480); | 454 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480); |
454 audio_processor->OnCaptureFormatChanged(source_params); | 455 audio_processor->OnCaptureFormatChanged(source_params); |
455 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2); | 456 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2); |
456 | 457 |
457 // Construct left and right channels, and assign different values to the | 458 // Construct left and right channels, and assign different values to the |
458 // first data of the left channel and right channel. | 459 // first data of the left channel and right channel. |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
502 #endif | 503 #endif |
503 | 504 |
504 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestWithKeyboardMicChannel) { | 505 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestWithKeyboardMicChannel) { |
505 MockMediaConstraintFactory constraint_factory; | 506 MockMediaConstraintFactory constraint_factory; |
506 constraint_factory.AddMandatory( | 507 constraint_factory.AddMandatory( |
507 MediaAudioConstraints::kGoogExperimentalNoiseSuppression, true); | 508 MediaAudioConstraints::kGoogExperimentalNoiseSuppression, true); |
508 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 509 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
509 new WebRtcAudioDeviceImpl()); | 510 new WebRtcAudioDeviceImpl()); |
510 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 511 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
511 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 512 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
512 constraint_factory.CreateWebMediaConstraints(), 0, | 513 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
513 webrtc_audio_device.get())); | 514 webrtc_audio_device.get())); |
514 EXPECT_TRUE(audio_processor->has_audio_processing()); | 515 EXPECT_TRUE(audio_processor->has_audio_processing()); |
515 | 516 |
516 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 517 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
517 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, | 518 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, |
518 48000, 16, 512); | 519 48000, 16, 512); |
519 audio_processor->OnCaptureFormatChanged(params); | 520 audio_processor->OnCaptureFormatChanged(params); |
520 | 521 |
521 ProcessDataAndVerifyFormat(audio_processor.get(), | 522 ProcessDataAndVerifyFormat(audio_processor.get(), |
522 kAudioProcessingSampleRate, | 523 kAudioProcessingSampleRate, |
(...skipping 29 matching lines...) Expand all Loading... |
552 expected.push_back(Point(0.02f, 0, 0)); | 553 expected.push_back(Point(0.02f, 0, 0)); |
553 ExpectPointVectorEqual(expected, ParseArrayGeometry("-0.02 0 0 0.02 0 0")); | 554 ExpectPointVectorEqual(expected, ParseArrayGeometry("-0.02 0 0 0.02 0 0")); |
554 } | 555 } |
555 { | 556 { |
556 PointVector expected(1, Point(1, 2, 3)); | 557 PointVector expected(1, Point(1, 2, 3)); |
557 ExpectPointVectorEqual(expected, ParseArrayGeometry("1 2 3")); | 558 ExpectPointVectorEqual(expected, ParseArrayGeometry("1 2 3")); |
558 } | 559 } |
559 } | 560 } |
560 | 561 |
561 } // namespace content | 562 } // namespace content |
OLD | NEW |