OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <vector> | 5 #include <vector> |
6 | 6 |
7 #include "base/files/file_path.h" | 7 #include "base/files/file_path.h" |
8 #include "base/files/file_util.h" | 8 #include "base/files/file_util.h" |
9 #include "base/logging.h" | 9 #include "base/logging.h" |
10 #include "base/memory/aligned_memory.h" | 10 #include "base/memory/aligned_memory.h" |
11 #include "base/path_service.h" | 11 #include "base/path_service.h" |
12 #include "base/time/time.h" | 12 #include "base/time/time.h" |
13 #include "content/public/common/media_stream_request.h" | 13 #include "content/public/common/media_stream_request.h" |
14 #include "content/renderer/media/media_stream_audio_processor.h" | 14 #include "content/renderer/media/media_stream_audio_processor.h" |
15 #include "content/renderer/media/media_stream_audio_processor_options.h" | 15 #include "content/renderer/media/media_stream_audio_processor_options.h" |
16 #include "content/renderer/media/mock_media_constraint_factory.h" | 16 #include "content/renderer/media/mock_media_constraint_factory.h" |
17 #include "media/audio/audio_parameters.h" | 17 #include "media/audio/audio_parameters.h" |
18 #include "media/base/audio_bus.h" | 18 #include "media/base/audio_bus.h" |
19 #include "testing/gmock/include/gmock/gmock.h" | 19 #include "testing/gmock/include/gmock/gmock.h" |
20 #include "testing/gtest/include/gtest/gtest.h" | 20 #include "testing/gtest/include/gtest/gtest.h" |
21 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" | 21 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" |
22 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" | 22 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" |
23 | 23 |
24 using ::testing::_; | 24 using ::testing::_; |
25 using ::testing::AnyNumber; | 25 using ::testing::AnyNumber; |
26 using ::testing::AtLeast; | 26 using ::testing::AtLeast; |
27 using ::testing::Return; | 27 using ::testing::Return; |
28 | 28 |
| 29 using media::AudioParameters; |
| 30 |
| 31 namespace webrtc { |
| 32 |
| 33 bool operator==(const webrtc::Point& lhs, const webrtc::Point& rhs) { |
| 34 return lhs.x() == rhs.x() && lhs.y() == rhs.y() && lhs.z() == rhs.z(); |
| 35 } |
| 36 |
| 37 } // namespace webrtc |
| 38 |
29 namespace content { | 39 namespace content { |
30 | 40 |
31 namespace { | 41 namespace { |
32 | 42 |
33 #if defined(ANDROID) | 43 #if defined(ANDROID) |
34 const int kAudioProcessingSampleRate = 16000; | 44 const int kAudioProcessingSampleRate = 16000; |
35 #else | 45 #else |
36 const int kAudioProcessingSampleRate = 48000; | 46 const int kAudioProcessingSampleRate = 48000; |
37 #endif | 47 #endif |
38 const int kAudioProcessingNumberOfChannel = 1; | 48 const int kAudioProcessingNumberOfChannel = 1; |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
126 | 136 |
127 media::AudioBus* processed_data = nullptr; | 137 media::AudioBus* processed_data = nullptr; |
128 base::TimeDelta capture_delay; | 138 base::TimeDelta capture_delay; |
129 int new_volume = 0; | 139 int new_volume = 0; |
130 while (audio_processor->ProcessAndConsumeData( | 140 while (audio_processor->ProcessAndConsumeData( |
131 255, false, &processed_data, &capture_delay, &new_volume)) { | 141 255, false, &processed_data, &capture_delay, &new_volume)) { |
132 EXPECT_TRUE(processed_data); | 142 EXPECT_TRUE(processed_data); |
133 EXPECT_NEAR(input_capture_delay.InMillisecondsF(), | 143 EXPECT_NEAR(input_capture_delay.InMillisecondsF(), |
134 capture_delay.InMillisecondsF(), | 144 capture_delay.InMillisecondsF(), |
135 output_buffer_duration.InMillisecondsF()); | 145 output_buffer_duration.InMillisecondsF()); |
136 EXPECT_EQ(audio_processor->OutputFormat().sample_rate(), | 146 EXPECT_EQ(expected_output_sample_rate, |
137 expected_output_sample_rate); | 147 audio_processor->OutputFormat().sample_rate()); |
138 EXPECT_EQ(audio_processor->OutputFormat().channels(), | 148 EXPECT_EQ(expected_output_channels, |
139 expected_output_channels); | 149 audio_processor->OutputFormat().channels()); |
140 EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(), | 150 EXPECT_EQ(expected_output_buffer_size, |
141 expected_output_buffer_size); | 151 audio_processor->OutputFormat().frames_per_buffer()); |
142 } | 152 } |
143 | 153 |
144 data_ptr += params.frames_per_buffer() * params.channels(); | 154 data_ptr += params.frames_per_buffer() * params.channels(); |
145 } | 155 } |
146 } | 156 } |
147 | 157 |
148 void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) { | 158 void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) { |
149 webrtc::AudioProcessing* audio_processing = | 159 webrtc::AudioProcessing* audio_processing = |
150 audio_processor->audio_processing_.get(); | 160 audio_processor->audio_processing_.get(); |
151 #if defined(OS_ANDROID) | 161 #if defined(OS_ANDROID) |
(...skipping 22 matching lines...) Expand all Loading... |
174 #else | 184 #else |
175 EXPECT_TRUE(audio_processing->gain_control()->mode() == | 185 EXPECT_TRUE(audio_processing->gain_control()->mode() == |
176 webrtc::GainControl::kAdaptiveAnalog); | 186 webrtc::GainControl::kAdaptiveAnalog); |
177 EXPECT_TRUE(audio_processing->voice_detection()->is_enabled()); | 187 EXPECT_TRUE(audio_processing->voice_detection()->is_enabled()); |
178 EXPECT_TRUE(audio_processing->voice_detection()->likelihood() == | 188 EXPECT_TRUE(audio_processing->voice_detection()->likelihood() == |
179 webrtc::VoiceDetection::kVeryLowLikelihood); | 189 webrtc::VoiceDetection::kVeryLowLikelihood); |
180 #endif | 190 #endif |
181 } | 191 } |
182 | 192 |
183 media::AudioParameters params_; | 193 media::AudioParameters params_; |
| 194 MediaStreamDevice::AudioDeviceParameters input_device_params_; |
184 }; | 195 }; |
185 | 196 |
186 // Test crashing with ASAN on Android. crbug.com/468762 | 197 // Test crashing with ASAN on Android. crbug.com/468762 |
187 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) | 198 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) |
188 #define MAYBE_WithAudioProcessing DISABLED_WithAudioProcessing | 199 #define MAYBE_WithAudioProcessing DISABLED_WithAudioProcessing |
189 #else | 200 #else |
190 #define MAYBE_WithAudioProcessing WithAudioProcessing | 201 #define MAYBE_WithAudioProcessing WithAudioProcessing |
191 #endif | 202 #endif |
192 TEST_F(MediaStreamAudioProcessorTest, MAYBE_WithAudioProcessing) { | 203 TEST_F(MediaStreamAudioProcessorTest, MAYBE_WithAudioProcessing) { |
193 MockMediaConstraintFactory constraint_factory; | 204 MockMediaConstraintFactory constraint_factory; |
194 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 205 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
195 new WebRtcAudioDeviceImpl()); | 206 new WebRtcAudioDeviceImpl()); |
196 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 207 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
197 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 208 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
198 constraint_factory.CreateWebMediaConstraints(), 0, | 209 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
199 webrtc_audio_device.get())); | 210 webrtc_audio_device.get())); |
200 EXPECT_TRUE(audio_processor->has_audio_processing()); | 211 EXPECT_TRUE(audio_processor->has_audio_processing()); |
201 audio_processor->OnCaptureFormatChanged(params_); | 212 audio_processor->OnCaptureFormatChanged(params_); |
202 VerifyDefaultComponents(audio_processor.get()); | 213 VerifyDefaultComponents(audio_processor.get()); |
203 | 214 |
204 ProcessDataAndVerifyFormat(audio_processor.get(), | 215 ProcessDataAndVerifyFormat(audio_processor.get(), |
205 kAudioProcessingSampleRate, | 216 kAudioProcessingSampleRate, |
206 kAudioProcessingNumberOfChannel, | 217 kAudioProcessingNumberOfChannel, |
207 kAudioProcessingSampleRate / 100); | 218 kAudioProcessingSampleRate / 100); |
208 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 219 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
209 // |audio_processor|. | 220 // |audio_processor|. |
210 audio_processor = NULL; | 221 audio_processor = NULL; |
211 } | 222 } |
212 | 223 |
213 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { | 224 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { |
214 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 225 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
215 new WebRtcAudioDeviceImpl()); | 226 new WebRtcAudioDeviceImpl()); |
216 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source. | 227 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source. |
217 MockMediaConstraintFactory tab_constraint_factory; | 228 MockMediaConstraintFactory tab_constraint_factory; |
218 const std::string tab_string = kMediaStreamSourceTab; | 229 const std::string tab_string = kMediaStreamSourceTab; |
219 tab_constraint_factory.AddMandatory(kMediaStreamSource, | 230 tab_constraint_factory.AddMandatory(kMediaStreamSource, |
220 tab_string); | 231 tab_string); |
221 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 232 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
222 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 233 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
223 tab_constraint_factory.CreateWebMediaConstraints(), 0, | 234 tab_constraint_factory.CreateWebMediaConstraints(), |
224 webrtc_audio_device.get())); | 235 input_device_params_, webrtc_audio_device.get())); |
225 EXPECT_FALSE(audio_processor->has_audio_processing()); | 236 EXPECT_FALSE(audio_processor->has_audio_processing()); |
226 audio_processor->OnCaptureFormatChanged(params_); | 237 audio_processor->OnCaptureFormatChanged(params_); |
227 | 238 |
228 ProcessDataAndVerifyFormat(audio_processor.get(), | 239 ProcessDataAndVerifyFormat(audio_processor.get(), |
229 params_.sample_rate(), | 240 params_.sample_rate(), |
230 params_.channels(), | 241 params_.channels(), |
231 params_.sample_rate() / 100); | 242 params_.sample_rate() / 100); |
232 | 243 |
233 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem | 244 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem |
234 // source. | 245 // source. |
235 MockMediaConstraintFactory system_constraint_factory; | 246 MockMediaConstraintFactory system_constraint_factory; |
236 const std::string system_string = kMediaStreamSourceSystem; | 247 const std::string system_string = kMediaStreamSourceSystem; |
237 system_constraint_factory.AddMandatory(kMediaStreamSource, | 248 system_constraint_factory.AddMandatory(kMediaStreamSource, |
238 system_string); | 249 system_string); |
239 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 250 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
240 system_constraint_factory.CreateWebMediaConstraints(), 0, | 251 system_constraint_factory.CreateWebMediaConstraints(), |
241 webrtc_audio_device.get()); | 252 input_device_params_, webrtc_audio_device.get()); |
242 EXPECT_FALSE(audio_processor->has_audio_processing()); | 253 EXPECT_FALSE(audio_processor->has_audio_processing()); |
243 | 254 |
244 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 255 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
245 // |audio_processor|. | 256 // |audio_processor|. |
246 audio_processor = NULL; | 257 audio_processor = NULL; |
247 } | 258 } |
248 | 259 |
249 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) { | 260 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) { |
250 // Turn off the default constraints and pass it to MediaStreamAudioProcessor. | 261 // Turn off the default constraints and pass it to MediaStreamAudioProcessor. |
251 MockMediaConstraintFactory constraint_factory; | 262 MockMediaConstraintFactory constraint_factory; |
252 constraint_factory.DisableDefaultAudioConstraints(); | 263 constraint_factory.DisableDefaultAudioConstraints(); |
253 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 264 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
254 new WebRtcAudioDeviceImpl()); | 265 new WebRtcAudioDeviceImpl()); |
255 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 266 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
256 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 267 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
257 constraint_factory.CreateWebMediaConstraints(), 0, | 268 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
258 webrtc_audio_device.get())); | 269 webrtc_audio_device.get())); |
259 EXPECT_FALSE(audio_processor->has_audio_processing()); | 270 EXPECT_FALSE(audio_processor->has_audio_processing()); |
260 audio_processor->OnCaptureFormatChanged(params_); | 271 audio_processor->OnCaptureFormatChanged(params_); |
261 | 272 |
262 ProcessDataAndVerifyFormat(audio_processor.get(), | 273 ProcessDataAndVerifyFormat(audio_processor.get(), |
263 params_.sample_rate(), | 274 params_.sample_rate(), |
264 params_.channels(), | 275 params_.channels(), |
265 params_.sample_rate() / 100); | 276 params_.sample_rate() / 100); |
266 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 277 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
267 // |audio_processor|. | 278 // |audio_processor|. |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
365 | 376 |
366 TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) { | 377 TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) { |
367 MockMediaConstraintFactory constraint_factory; | 378 MockMediaConstraintFactory constraint_factory; |
368 const std::string dummy_constraint = "dummy"; | 379 const std::string dummy_constraint = "dummy"; |
369 constraint_factory.AddMandatory(dummy_constraint, true); | 380 constraint_factory.AddMandatory(dummy_constraint, true); |
370 MediaAudioConstraints audio_constraints( | 381 MediaAudioConstraints audio_constraints( |
371 constraint_factory.CreateWebMediaConstraints(), 0); | 382 constraint_factory.CreateWebMediaConstraints(), 0); |
372 EXPECT_FALSE(audio_constraints.IsValid()); | 383 EXPECT_FALSE(audio_constraints.IsValid()); |
373 } | 384 } |
374 | 385 |
| 386 MediaAudioConstraints MakeMediaAudioConstraints( |
| 387 const MockMediaConstraintFactory& constraint_factory) { |
| 388 return MediaAudioConstraints(constraint_factory.CreateWebMediaConstraints(), |
| 389 AudioParameters::NO_EFFECTS); |
| 390 } |
| 391 |
| 392 TEST_F(MediaStreamAudioProcessorTest, SelectsConstraintsArrayGeometryIfExists) { |
| 393 std::vector<webrtc::Point> constraints_geometry(1, |
| 394 webrtc::Point(-0.02, 0, 0)); |
| 395 constraints_geometry.push_back(webrtc::Point(0.02, 0, 0)); |
| 396 |
| 397 std::vector<webrtc::Point> input_device_geometry(1, webrtc::Point(0, 0, 0)); |
| 398 input_device_geometry.push_back(webrtc::Point(0, 0.05f, 0)); |
| 399 |
| 400 { |
| 401 // Both geometries empty. |
| 402 MockMediaConstraintFactory constraint_factory; |
| 403 MediaStreamDevice::AudioDeviceParameters input_params; |
| 404 |
| 405 const auto& actual_geometry = GetArrayGeometryPreferringConstraints( |
| 406 MakeMediaAudioConstraints(constraint_factory), input_params); |
| 407 EXPECT_EQ(std::vector<webrtc::Point>(), actual_geometry); |
| 408 } |
| 409 { |
| 410 // Constraints geometry empty. |
| 411 MockMediaConstraintFactory constraint_factory; |
| 412 MediaStreamDevice::AudioDeviceParameters input_params; |
| 413 input_params.mic_positions.push_back(media::Point(0, 0, 0)); |
| 414 input_params.mic_positions.push_back(media::Point(0, 0.05f, 0)); |
| 415 |
| 416 const auto& actual_geometry = GetArrayGeometryPreferringConstraints( |
| 417 MakeMediaAudioConstraints(constraint_factory), input_params); |
| 418 EXPECT_EQ(input_device_geometry, actual_geometry); |
| 419 } |
| 420 { |
| 421 // Input device geometry empty. |
| 422 MockMediaConstraintFactory constraint_factory; |
| 423 constraint_factory.AddOptional(MediaAudioConstraints::kGoogArrayGeometry, |
| 424 std::string("-0.02 0 0 0.02 0 0")); |
| 425 MediaStreamDevice::AudioDeviceParameters input_params; |
| 426 |
| 427 const auto& actual_geometry = GetArrayGeometryPreferringConstraints( |
| 428 MakeMediaAudioConstraints(constraint_factory), input_params); |
| 429 EXPECT_EQ(constraints_geometry, actual_geometry); |
| 430 } |
| 431 { |
| 432 // Both geometries existing. |
| 433 MockMediaConstraintFactory constraint_factory; |
| 434 constraint_factory.AddOptional(MediaAudioConstraints::kGoogArrayGeometry, |
| 435 std::string("-0.02 0 0 0.02 0 0")); |
| 436 MediaStreamDevice::AudioDeviceParameters input_params; |
| 437 input_params.mic_positions.push_back(media::Point(0, 0, 0)); |
| 438 input_params.mic_positions.push_back(media::Point(0, 0.05f, 0)); |
| 439 |
| 440 // Constraints geometry is preferred. |
| 441 const auto& actual_geometry = GetArrayGeometryPreferringConstraints( |
| 442 MakeMediaAudioConstraints(constraint_factory), input_params); |
| 443 EXPECT_EQ(constraints_geometry, actual_geometry); |
| 444 } |
| 445 } |
| 446 |
375 // Test crashing with ASAN on Android. crbug.com/468762 | 447 // Test crashing with ASAN on Android. crbug.com/468762 |
376 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) | 448 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) |
377 #define MAYBE_TestAllSampleRates DISABLED_TestAllSampleRates | 449 #define MAYBE_TestAllSampleRates DISABLED_TestAllSampleRates |
378 #else | 450 #else |
379 #define MAYBE_TestAllSampleRates TestAllSampleRates | 451 #define MAYBE_TestAllSampleRates TestAllSampleRates |
380 #endif | 452 #endif |
381 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestAllSampleRates) { | 453 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestAllSampleRates) { |
382 MockMediaConstraintFactory constraint_factory; | 454 MockMediaConstraintFactory constraint_factory; |
383 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 455 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
384 new WebRtcAudioDeviceImpl()); | 456 new WebRtcAudioDeviceImpl()); |
385 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 457 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
386 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 458 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
387 constraint_factory.CreateWebMediaConstraints(), 0, | 459 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
388 webrtc_audio_device.get())); | 460 webrtc_audio_device.get())); |
389 EXPECT_TRUE(audio_processor->has_audio_processing()); | 461 EXPECT_TRUE(audio_processor->has_audio_processing()); |
390 | 462 |
391 static const int kSupportedSampleRates[] = | 463 static const int kSupportedSampleRates[] = |
392 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 }; | 464 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 }; |
393 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) { | 465 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) { |
394 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ? | 466 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ? |
395 kSupportedSampleRates[i] / 100 : 128; | 467 kSupportedSampleRates[i] / 100 : 128; |
396 media::AudioParameters params( | 468 media::AudioParameters params( |
397 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 469 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
(...skipping 20 matching lines...) Expand all Loading... |
418 base::MessageLoopForUI message_loop; | 490 base::MessageLoopForUI message_loop; |
419 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_( | 491 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_( |
420 new AecDumpMessageFilter(message_loop.task_runner(), | 492 new AecDumpMessageFilter(message_loop.task_runner(), |
421 message_loop.task_runner())); | 493 message_loop.task_runner())); |
422 | 494 |
423 MockMediaConstraintFactory constraint_factory; | 495 MockMediaConstraintFactory constraint_factory; |
424 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 496 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
425 new WebRtcAudioDeviceImpl()); | 497 new WebRtcAudioDeviceImpl()); |
426 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 498 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
427 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 499 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
428 constraint_factory.CreateWebMediaConstraints(), 0, | 500 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
429 webrtc_audio_device.get())); | 501 webrtc_audio_device.get())); |
430 | 502 |
431 EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get()); | 503 EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get()); |
432 | 504 |
433 audio_processor = NULL; | 505 audio_processor = NULL; |
434 } | 506 } |
435 | 507 |
436 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) { | 508 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) { |
437 // Set up the correct constraints to turn off the audio processing and turn | 509 // Set up the correct constraints to turn off the audio processing and turn |
438 // on the stereo channels mirroring. | 510 // on the stereo channels mirroring. |
439 MockMediaConstraintFactory constraint_factory; | 511 MockMediaConstraintFactory constraint_factory; |
440 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation, | 512 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation, |
441 false); | 513 false); |
442 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring, | 514 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring, |
443 true); | 515 true); |
444 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 516 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
445 new WebRtcAudioDeviceImpl()); | 517 new WebRtcAudioDeviceImpl()); |
446 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 518 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
447 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 519 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
448 constraint_factory.CreateWebMediaConstraints(), 0, | 520 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
449 webrtc_audio_device.get())); | 521 webrtc_audio_device.get())); |
450 EXPECT_FALSE(audio_processor->has_audio_processing()); | 522 EXPECT_FALSE(audio_processor->has_audio_processing()); |
451 const media::AudioParameters source_params( | 523 const media::AudioParameters source_params( |
452 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 524 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
453 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480); | 525 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480); |
454 audio_processor->OnCaptureFormatChanged(source_params); | 526 audio_processor->OnCaptureFormatChanged(source_params); |
455 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2); | 527 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2); |
456 | 528 |
457 // Construct left and right channels, and assign different values to the | 529 // Construct left and right channels, and assign different values to the |
458 // first data of the left channel and right channel. | 530 // first data of the left channel and right channel. |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
502 #endif | 574 #endif |
503 | 575 |
504 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestWithKeyboardMicChannel) { | 576 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestWithKeyboardMicChannel) { |
505 MockMediaConstraintFactory constraint_factory; | 577 MockMediaConstraintFactory constraint_factory; |
506 constraint_factory.AddMandatory( | 578 constraint_factory.AddMandatory( |
507 MediaAudioConstraints::kGoogExperimentalNoiseSuppression, true); | 579 MediaAudioConstraints::kGoogExperimentalNoiseSuppression, true); |
508 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 580 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
509 new WebRtcAudioDeviceImpl()); | 581 new WebRtcAudioDeviceImpl()); |
510 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 582 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
511 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 583 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
512 constraint_factory.CreateWebMediaConstraints(), 0, | 584 constraint_factory.CreateWebMediaConstraints(), input_device_params_, |
513 webrtc_audio_device.get())); | 585 webrtc_audio_device.get())); |
514 EXPECT_TRUE(audio_processor->has_audio_processing()); | 586 EXPECT_TRUE(audio_processor->has_audio_processing()); |
515 | 587 |
516 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 588 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
517 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, | 589 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, |
518 48000, 16, 512); | 590 48000, 16, 512); |
519 audio_processor->OnCaptureFormatChanged(params); | 591 audio_processor->OnCaptureFormatChanged(params); |
520 | 592 |
521 ProcessDataAndVerifyFormat(audio_processor.get(), | 593 ProcessDataAndVerifyFormat(audio_processor.get(), |
522 kAudioProcessingSampleRate, | 594 kAudioProcessingSampleRate, |
523 kAudioProcessingNumberOfChannel, | 595 kAudioProcessingNumberOfChannel, |
524 kAudioProcessingSampleRate / 100); | 596 kAudioProcessingSampleRate / 100); |
525 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 597 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
526 // |audio_processor|. | 598 // |audio_processor|. |
527 audio_processor = NULL; | 599 audio_processor = NULL; |
528 } | 600 } |
529 | 601 |
530 using Point = webrtc::Point; | |
531 using PointVector = std::vector<Point>; | |
532 | |
533 void ExpectPointVectorEqual(const PointVector& expected, | |
534 const PointVector& actual) { | |
535 EXPECT_EQ(expected.size(), actual.size()); | |
536 for (size_t i = 0; i < actual.size(); ++i) { | |
537 EXPECT_EQ(expected[i].x(), actual[i].x()); | |
538 EXPECT_EQ(expected[i].y(), actual[i].y()); | |
539 EXPECT_EQ(expected[i].z(), actual[i].z()); | |
540 } | |
541 } | |
542 | |
543 TEST(MediaStreamAudioProcessorOptionsTest, ParseArrayGeometry) { | |
544 const PointVector expected_empty; | |
545 ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("")); | |
546 ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("0 0 a")); | |
547 ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("1 2")); | |
548 ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("1 2 3 4")); | |
549 | |
550 { | |
551 PointVector expected(1, Point(-0.02f, 0, 0)); | |
552 expected.push_back(Point(0.02f, 0, 0)); | |
553 ExpectPointVectorEqual(expected, ParseArrayGeometry("-0.02 0 0 0.02 0 0")); | |
554 } | |
555 { | |
556 PointVector expected(1, Point(1, 2, 3)); | |
557 ExpectPointVectorEqual(expected, ParseArrayGeometry("1 2 3")); | |
558 } | |
559 } | |
560 | |
561 } // namespace content | 602 } // namespace content |
OLD | NEW |