 Chromium Code Reviews
 Chromium Code Reviews Issue 1275783003:
  Add a virtual beamforming audio device on ChromeOS.  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master
    
  
    Issue 1275783003:
  Add a virtual beamforming audio device on ChromeOS.  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master| OLD | NEW | 
|---|---|
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include <vector> | 5 #include <vector> | 
| 6 | 6 | 
| 7 #include "base/files/file_path.h" | 7 #include "base/files/file_path.h" | 
| 8 #include "base/files/file_util.h" | 8 #include "base/files/file_util.h" | 
| 9 #include "base/logging.h" | 9 #include "base/logging.h" | 
| 10 #include "base/memory/aligned_memory.h" | 10 #include "base/memory/aligned_memory.h" | 
| 11 #include "base/path_service.h" | 11 #include "base/path_service.h" | 
| 12 #include "base/time/time.h" | 12 #include "base/time/time.h" | 
| 13 #include "content/public/common/media_stream_request.h" | 13 #include "content/public/common/media_stream_request.h" | 
| 14 #include "content/renderer/media/media_stream_audio_processor.h" | 14 #include "content/renderer/media/media_stream_audio_processor.h" | 
| 15 #include "content/renderer/media/media_stream_audio_processor_options.h" | 15 #include "content/renderer/media/media_stream_audio_processor_options.h" | 
| 16 #include "content/renderer/media/mock_media_constraint_factory.h" | 16 #include "content/renderer/media/mock_media_constraint_factory.h" | 
| 17 #include "media/audio/audio_parameters.h" | 17 #include "media/audio/audio_parameters.h" | 
| 18 #include "media/base/audio_bus.h" | 18 #include "media/base/audio_bus.h" | 
| 19 #include "testing/gmock/include/gmock/gmock.h" | 19 #include "testing/gmock/include/gmock/gmock.h" | 
| 20 #include "testing/gtest/include/gtest/gtest.h" | 20 #include "testing/gtest/include/gtest/gtest.h" | 
| 21 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" | 21 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" | 
| 22 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" | 22 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" | 
| 23 | 23 | 
| 24 using ::testing::_; | 24 using ::testing::_; | 
| 25 using ::testing::AnyNumber; | 25 using ::testing::AnyNumber; | 
| 26 using ::testing::AtLeast; | 26 using ::testing::AtLeast; | 
| 27 using ::testing::Return; | 27 using ::testing::Return; | 
| 28 | 28 | 
| 29 using media::AudioParameters; | |
| 30 | |
| 31 namespace webrtc { | |
| 32 | |
| 33 bool operator==(const webrtc::Point& lhs, const webrtc::Point& rhs) { | |
| 34 return lhs.x() == rhs.x() && lhs.y() == rhs.y() && lhs.z() == rhs.z(); | |
| 35 } | |
| 36 | |
| 37 } // namespace webrtc | |
| 38 | |
| 29 namespace content { | 39 namespace content { | 
| 30 | 40 | 
| 31 namespace { | 41 namespace { | 
| 32 | 42 | 
| 33 #if defined(ANDROID) | 43 #if defined(ANDROID) | 
| 34 const int kAudioProcessingSampleRate = 16000; | 44 const int kAudioProcessingSampleRate = 16000; | 
| 35 #else | 45 #else | 
| 36 const int kAudioProcessingSampleRate = 48000; | 46 const int kAudioProcessingSampleRate = 48000; | 
| 37 #endif | 47 #endif | 
| 38 const int kAudioProcessingNumberOfChannel = 1; | 48 const int kAudioProcessingNumberOfChannel = 1; | 
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 126 | 136 | 
| 127 media::AudioBus* processed_data = nullptr; | 137 media::AudioBus* processed_data = nullptr; | 
| 128 base::TimeDelta capture_delay; | 138 base::TimeDelta capture_delay; | 
| 129 int new_volume = 0; | 139 int new_volume = 0; | 
| 130 while (audio_processor->ProcessAndConsumeData( | 140 while (audio_processor->ProcessAndConsumeData( | 
| 131 255, false, &processed_data, &capture_delay, &new_volume)) { | 141 255, false, &processed_data, &capture_delay, &new_volume)) { | 
| 132 EXPECT_TRUE(processed_data); | 142 EXPECT_TRUE(processed_data); | 
| 133 EXPECT_NEAR(input_capture_delay.InMillisecondsF(), | 143 EXPECT_NEAR(input_capture_delay.InMillisecondsF(), | 
| 134 capture_delay.InMillisecondsF(), | 144 capture_delay.InMillisecondsF(), | 
| 135 output_buffer_duration.InMillisecondsF()); | 145 output_buffer_duration.InMillisecondsF()); | 
| 136 EXPECT_EQ(audio_processor->OutputFormat().sample_rate(), | 146 EXPECT_EQ(expected_output_sample_rate, | 
| 137 expected_output_sample_rate); | 147 audio_processor->OutputFormat().sample_rate()); | 
| 138 EXPECT_EQ(audio_processor->OutputFormat().channels(), | 148 EXPECT_EQ(expected_output_channels, | 
| 139 expected_output_channels); | 149 audio_processor->OutputFormat().channels()); | 
| 140 EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(), | 150 EXPECT_EQ(expected_output_buffer_size, | 
| 141 expected_output_buffer_size); | 151 audio_processor->OutputFormat().frames_per_buffer()); | 
| 142 } | 152 } | 
| 143 | 153 | 
| 144 data_ptr += params.frames_per_buffer() * params.channels(); | 154 data_ptr += params.frames_per_buffer() * params.channels(); | 
| 145 } | 155 } | 
| 146 } | 156 } | 
| 147 | 157 | 
| 148 void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) { | 158 void VerifyDefaultComponents(MediaStreamAudioProcessor* audio_processor) { | 
| 149 webrtc::AudioProcessing* audio_processing = | 159 webrtc::AudioProcessing* audio_processing = | 
| 150 audio_processor->audio_processing_.get(); | 160 audio_processor->audio_processing_.get(); | 
| 151 #if defined(OS_ANDROID) | 161 #if defined(OS_ANDROID) | 
| (...skipping 22 matching lines...) Expand all Loading... | |
| 174 #else | 184 #else | 
| 175 EXPECT_TRUE(audio_processing->gain_control()->mode() == | 185 EXPECT_TRUE(audio_processing->gain_control()->mode() == | 
| 176 webrtc::GainControl::kAdaptiveAnalog); | 186 webrtc::GainControl::kAdaptiveAnalog); | 
| 177 EXPECT_TRUE(audio_processing->voice_detection()->is_enabled()); | 187 EXPECT_TRUE(audio_processing->voice_detection()->is_enabled()); | 
| 178 EXPECT_TRUE(audio_processing->voice_detection()->likelihood() == | 188 EXPECT_TRUE(audio_processing->voice_detection()->likelihood() == | 
| 179 webrtc::VoiceDetection::kVeryLowLikelihood); | 189 webrtc::VoiceDetection::kVeryLowLikelihood); | 
| 180 #endif | 190 #endif | 
| 181 } | 191 } | 
| 182 | 192 | 
| 183 media::AudioParameters params_; | 193 media::AudioParameters params_; | 
| 194 MediaStreamDevice::AudioDeviceParameters input_device_params_; | |
| 184 }; | 195 }; | 
| 185 | 196 | 
| 186 // Test crashing with ASAN on Android. crbug.com/468762 | 197 // Test crashing with ASAN on Android. crbug.com/468762 | 
| 187 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) | 198 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) | 
| 188 #define MAYBE_WithAudioProcessing DISABLED_WithAudioProcessing | 199 #define MAYBE_WithAudioProcessing DISABLED_WithAudioProcessing | 
| 189 #else | 200 #else | 
| 190 #define MAYBE_WithAudioProcessing WithAudioProcessing | 201 #define MAYBE_WithAudioProcessing WithAudioProcessing | 
| 191 #endif | 202 #endif | 
| 192 TEST_F(MediaStreamAudioProcessorTest, MAYBE_WithAudioProcessing) { | 203 TEST_F(MediaStreamAudioProcessorTest, MAYBE_WithAudioProcessing) { | 
| 193 MockMediaConstraintFactory constraint_factory; | 204 MockMediaConstraintFactory constraint_factory; | 
| 194 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 205 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 
| 195 new WebRtcAudioDeviceImpl()); | 206 new WebRtcAudioDeviceImpl()); | 
| 196 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 207 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 
| 197 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 208 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 
| 198 constraint_factory.CreateWebMediaConstraints(), 0, | 209 constraint_factory.CreateWebMediaConstraints(), input_device_params_, | 
| 199 webrtc_audio_device.get())); | 210 webrtc_audio_device.get())); | 
| 200 EXPECT_TRUE(audio_processor->has_audio_processing()); | 211 EXPECT_TRUE(audio_processor->has_audio_processing()); | 
| 201 audio_processor->OnCaptureFormatChanged(params_); | 212 audio_processor->OnCaptureFormatChanged(params_); | 
| 202 VerifyDefaultComponents(audio_processor.get()); | 213 VerifyDefaultComponents(audio_processor.get()); | 
| 203 | 214 | 
| 204 ProcessDataAndVerifyFormat(audio_processor.get(), | 215 ProcessDataAndVerifyFormat(audio_processor.get(), | 
| 205 kAudioProcessingSampleRate, | 216 kAudioProcessingSampleRate, | 
| 206 kAudioProcessingNumberOfChannel, | 217 kAudioProcessingNumberOfChannel, | 
| 207 kAudioProcessingSampleRate / 100); | 218 kAudioProcessingSampleRate / 100); | 
| 208 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 219 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 
| 209 // |audio_processor|. | 220 // |audio_processor|. | 
| 210 audio_processor = NULL; | 221 audio_processor = NULL; | 
| 211 } | 222 } | 
| 212 | 223 | 
| 213 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { | 224 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { | 
| 214 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 225 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 
| 215 new WebRtcAudioDeviceImpl()); | 226 new WebRtcAudioDeviceImpl()); | 
| 216 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source. | 227 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source. | 
| 217 MockMediaConstraintFactory tab_constraint_factory; | 228 MockMediaConstraintFactory tab_constraint_factory; | 
| 218 const std::string tab_string = kMediaStreamSourceTab; | 229 const std::string tab_string = kMediaStreamSourceTab; | 
| 219 tab_constraint_factory.AddMandatory(kMediaStreamSource, | 230 tab_constraint_factory.AddMandatory(kMediaStreamSource, | 
| 220 tab_string); | 231 tab_string); | 
| 221 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 232 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 
| 222 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 233 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 
| 223 tab_constraint_factory.CreateWebMediaConstraints(), 0, | 234 tab_constraint_factory.CreateWebMediaConstraints(), | 
| 224 webrtc_audio_device.get())); | 235 input_device_params_, webrtc_audio_device.get())); | 
| 225 EXPECT_FALSE(audio_processor->has_audio_processing()); | 236 EXPECT_FALSE(audio_processor->has_audio_processing()); | 
| 226 audio_processor->OnCaptureFormatChanged(params_); | 237 audio_processor->OnCaptureFormatChanged(params_); | 
| 227 | 238 | 
| 228 ProcessDataAndVerifyFormat(audio_processor.get(), | 239 ProcessDataAndVerifyFormat(audio_processor.get(), | 
| 229 params_.sample_rate(), | 240 params_.sample_rate(), | 
| 230 params_.channels(), | 241 params_.channels(), | 
| 231 params_.sample_rate() / 100); | 242 params_.sample_rate() / 100); | 
| 232 | 243 | 
| 233 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem | 244 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem | 
| 234 // source. | 245 // source. | 
| 235 MockMediaConstraintFactory system_constraint_factory; | 246 MockMediaConstraintFactory system_constraint_factory; | 
| 236 const std::string system_string = kMediaStreamSourceSystem; | 247 const std::string system_string = kMediaStreamSourceSystem; | 
| 237 system_constraint_factory.AddMandatory(kMediaStreamSource, | 248 system_constraint_factory.AddMandatory(kMediaStreamSource, | 
| 238 system_string); | 249 system_string); | 
| 239 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 250 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 
| 240 system_constraint_factory.CreateWebMediaConstraints(), 0, | 251 system_constraint_factory.CreateWebMediaConstraints(), | 
| 241 webrtc_audio_device.get()); | 252 input_device_params_, webrtc_audio_device.get()); | 
| 242 EXPECT_FALSE(audio_processor->has_audio_processing()); | 253 EXPECT_FALSE(audio_processor->has_audio_processing()); | 
| 243 | 254 | 
| 244 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 255 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 
| 245 // |audio_processor|. | 256 // |audio_processor|. | 
| 246 audio_processor = NULL; | 257 audio_processor = NULL; | 
| 247 } | 258 } | 
| 248 | 259 | 
| 249 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) { | 260 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) { | 
| 250 // Turn off the default constraints and pass it to MediaStreamAudioProcessor. | 261 // Turn off the default constraints and pass it to MediaStreamAudioProcessor. | 
| 251 MockMediaConstraintFactory constraint_factory; | 262 MockMediaConstraintFactory constraint_factory; | 
| 252 constraint_factory.DisableDefaultAudioConstraints(); | 263 constraint_factory.DisableDefaultAudioConstraints(); | 
| 253 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 264 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 
| 254 new WebRtcAudioDeviceImpl()); | 265 new WebRtcAudioDeviceImpl()); | 
| 255 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 266 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 
| 256 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 267 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 
| 257 constraint_factory.CreateWebMediaConstraints(), 0, | 268 constraint_factory.CreateWebMediaConstraints(), input_device_params_, | 
| 258 webrtc_audio_device.get())); | 269 webrtc_audio_device.get())); | 
| 259 EXPECT_FALSE(audio_processor->has_audio_processing()); | 270 EXPECT_FALSE(audio_processor->has_audio_processing()); | 
| 260 audio_processor->OnCaptureFormatChanged(params_); | 271 audio_processor->OnCaptureFormatChanged(params_); | 
| 261 | 272 | 
| 262 ProcessDataAndVerifyFormat(audio_processor.get(), | 273 ProcessDataAndVerifyFormat(audio_processor.get(), | 
| 263 params_.sample_rate(), | 274 params_.sample_rate(), | 
| 264 params_.channels(), | 275 params_.channels(), | 
| 265 params_.sample_rate() / 100); | 276 params_.sample_rate() / 100); | 
| 266 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 277 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 
| 267 // |audio_processor|. | 278 // |audio_processor|. | 
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 360 | 371 | 
| 361 TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) { | 372 TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) { | 
| 362 MockMediaConstraintFactory constraint_factory; | 373 MockMediaConstraintFactory constraint_factory; | 
| 363 const std::string dummy_constraint = "dummy"; | 374 const std::string dummy_constraint = "dummy"; | 
| 364 constraint_factory.AddMandatory(dummy_constraint, true); | 375 constraint_factory.AddMandatory(dummy_constraint, true); | 
| 365 MediaAudioConstraints audio_constraints( | 376 MediaAudioConstraints audio_constraints( | 
| 366 constraint_factory.CreateWebMediaConstraints(), 0); | 377 constraint_factory.CreateWebMediaConstraints(), 0); | 
| 367 EXPECT_FALSE(audio_constraints.IsValid()); | 378 EXPECT_FALSE(audio_constraints.IsValid()); | 
| 368 } | 379 } | 
| 369 | 380 | 
| 381 MediaAudioConstraints MakeMediaAudioConstraints( | |
| 382 const MockMediaConstraintFactory& constraint_factory) { | |
| 383 return MediaAudioConstraints(constraint_factory.CreateWebMediaConstraints(), | |
| 384 AudioParameters::NO_EFFECTS); | |
| 385 } | |
| 386 | |
| 387 TEST_F(MediaStreamAudioProcessorTest, SelectsConstraintsArrayGeometryIfExists) { | |
| 388 std::vector<webrtc::Point> constraints_geometry(1, | |
| 389 webrtc::Point(-0.02f, 0, 0)); | |
| 390 constraints_geometry.push_back(webrtc::Point(0.02f, 0, 0)); | |
| 391 | |
| 392 std::vector<webrtc::Point> input_device_geometry(1, webrtc::Point(0, 0, 0)); | |
| 393 input_device_geometry.push_back(webrtc::Point(0, 0.05f, 0)); | |
| 394 | |
| 395 { | |
| 396 // Both geometries empty. | |
| 397 MockMediaConstraintFactory constraint_factory; | |
| 398 MediaStreamDevice::AudioDeviceParameters input_params; | |
| 399 | |
| 400 const auto& actual_geometry = GetArrayGeometryPreferringConstraints( | |
| 401 MakeMediaAudioConstraints(constraint_factory), input_params); | |
| 402 EXPECT_EQ(std::vector<webrtc::Point>(), actual_geometry); | |
| 403 } | |
| 404 { | |
| 405 // Constraints geometry empty. | |
| 406 MockMediaConstraintFactory constraint_factory; | |
| 407 MediaStreamDevice::AudioDeviceParameters input_params; | |
| 408 input_params.mic_positions.push_back(media::Point(0, 0, 0)); | |
| 409 input_params.mic_positions.push_back(media::Point(0, 0.05f, 0)); | |
| 410 | |
| 411 const auto& actual_geometry = GetArrayGeometryPreferringConstraints( | |
| 412 MakeMediaAudioConstraints(constraint_factory), input_params); | |
| 413 EXPECT_EQ(input_device_geometry, actual_geometry); | |
| 
aluebs-chromium
2015/09/10 02:41:39
Can't you use input_params.mic_positions here inst
 
ajm
2015/09/10 21:56:57
No, because input_params.mic_positions is a vector
 
aluebs-chromium
2015/09/11 01:58:42
Good point! But maybe you can use one to get the o
 
ajm
2015/09/11 05:06:32
I could, but that's part of the functionality I'm
 
aluebs-chromium
2015/09/11 06:09:34
Yes, you are right. You can leave it as is.
 | |
| 414 } | |
| 415 { | |
| 416 // Input device geometry empty. | |
| 417 MockMediaConstraintFactory constraint_factory; | |
| 418 constraint_factory.AddOptional(MediaAudioConstraints::kGoogArrayGeometry, | |
| 419 std::string("-0.02 0 0 0.02 0 0")); | |
| 420 MediaStreamDevice::AudioDeviceParameters input_params; | |
| 421 | |
| 422 const auto& actual_geometry = GetArrayGeometryPreferringConstraints( | |
| 423 MakeMediaAudioConstraints(constraint_factory), input_params); | |
| 424 EXPECT_EQ(constraints_geometry, actual_geometry); | |
| 425 } | |
| 426 { | |
| 427 // Both geometries existing. | |
| 428 MockMediaConstraintFactory constraint_factory; | |
| 429 constraint_factory.AddOptional(MediaAudioConstraints::kGoogArrayGeometry, | |
| 430 std::string("-0.02 0 0 0.02 0 0")); | |
| 431 MediaStreamDevice::AudioDeviceParameters input_params; | |
| 432 input_params.mic_positions.push_back(media::Point(0, 0, 0)); | |
| 433 input_params.mic_positions.push_back(media::Point(0, 0.05f, 0)); | |
| 434 | |
| 435 // Constraints geometry is preferred. | |
| 436 const auto& actual_geometry = GetArrayGeometryPreferringConstraints( | |
| 437 MakeMediaAudioConstraints(constraint_factory), input_params); | |
| 438 EXPECT_EQ(constraints_geometry, actual_geometry); | |
| 439 } | |
| 440 } | |
| 441 | |
| 370 // Test crashing with ASAN on Android. crbug.com/468762 | 442 // Test crashing with ASAN on Android. crbug.com/468762 | 
| 371 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) | 443 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) | 
| 372 #define MAYBE_TestAllSampleRates DISABLED_TestAllSampleRates | 444 #define MAYBE_TestAllSampleRates DISABLED_TestAllSampleRates | 
| 373 #else | 445 #else | 
| 374 #define MAYBE_TestAllSampleRates TestAllSampleRates | 446 #define MAYBE_TestAllSampleRates TestAllSampleRates | 
| 375 #endif | 447 #endif | 
| 376 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestAllSampleRates) { | 448 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestAllSampleRates) { | 
| 377 MockMediaConstraintFactory constraint_factory; | 449 MockMediaConstraintFactory constraint_factory; | 
| 378 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 450 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 
| 379 new WebRtcAudioDeviceImpl()); | 451 new WebRtcAudioDeviceImpl()); | 
| 380 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 452 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 
| 381 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 453 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 
| 382 constraint_factory.CreateWebMediaConstraints(), 0, | 454 constraint_factory.CreateWebMediaConstraints(), input_device_params_, | 
| 383 webrtc_audio_device.get())); | 455 webrtc_audio_device.get())); | 
| 384 EXPECT_TRUE(audio_processor->has_audio_processing()); | 456 EXPECT_TRUE(audio_processor->has_audio_processing()); | 
| 385 | 457 | 
| 386 static const int kSupportedSampleRates[] = | 458 static const int kSupportedSampleRates[] = | 
| 387 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 }; | 459 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 }; | 
| 388 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) { | 460 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) { | 
| 389 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ? | 461 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ? | 
| 390 kSupportedSampleRates[i] / 100 : 128; | 462 kSupportedSampleRates[i] / 100 : 128; | 
| 391 media::AudioParameters params( | 463 media::AudioParameters params( | 
| 392 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 464 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 
| (...skipping 20 matching lines...) Expand all Loading... | |
| 413 base::MessageLoopForUI message_loop; | 485 base::MessageLoopForUI message_loop; | 
| 414 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_( | 486 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_( | 
| 415 new AecDumpMessageFilter(message_loop.task_runner(), | 487 new AecDumpMessageFilter(message_loop.task_runner(), | 
| 416 message_loop.task_runner())); | 488 message_loop.task_runner())); | 
| 417 | 489 | 
| 418 MockMediaConstraintFactory constraint_factory; | 490 MockMediaConstraintFactory constraint_factory; | 
| 419 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 491 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 
| 420 new WebRtcAudioDeviceImpl()); | 492 new WebRtcAudioDeviceImpl()); | 
| 421 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 493 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 
| 422 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 494 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 
| 423 constraint_factory.CreateWebMediaConstraints(), 0, | 495 constraint_factory.CreateWebMediaConstraints(), input_device_params_, | 
| 424 webrtc_audio_device.get())); | 496 webrtc_audio_device.get())); | 
| 425 | 497 | 
| 426 EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get()); | 498 EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get()); | 
| 427 | 499 | 
| 428 audio_processor = NULL; | 500 audio_processor = NULL; | 
| 429 } | 501 } | 
| 430 | 502 | 
| 431 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) { | 503 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) { | 
| 432 // Set up the correct constraints to turn off the audio processing and turn | 504 // Set up the correct constraints to turn off the audio processing and turn | 
| 433 // on the stereo channels mirroring. | 505 // on the stereo channels mirroring. | 
| 434 MockMediaConstraintFactory constraint_factory; | 506 MockMediaConstraintFactory constraint_factory; | 
| 435 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation, | 507 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation, | 
| 436 false); | 508 false); | 
| 437 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring, | 509 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring, | 
| 438 true); | 510 true); | 
| 439 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 511 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 
| 440 new WebRtcAudioDeviceImpl()); | 512 new WebRtcAudioDeviceImpl()); | 
| 441 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 513 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 
| 442 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 514 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 
| 443 constraint_factory.CreateWebMediaConstraints(), 0, | 515 constraint_factory.CreateWebMediaConstraints(), input_device_params_, | 
| 444 webrtc_audio_device.get())); | 516 webrtc_audio_device.get())); | 
| 445 EXPECT_FALSE(audio_processor->has_audio_processing()); | 517 EXPECT_FALSE(audio_processor->has_audio_processing()); | 
| 446 const media::AudioParameters source_params( | 518 const media::AudioParameters source_params( | 
| 447 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 519 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 
| 448 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480); | 520 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480); | 
| 449 audio_processor->OnCaptureFormatChanged(source_params); | 521 audio_processor->OnCaptureFormatChanged(source_params); | 
| 450 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2); | 522 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2); | 
| 451 | 523 | 
| 452 // Construct left and right channels, and assign different values to the | 524 // Construct left and right channels, and assign different values to the | 
| 453 // first data of the left channel and right channel. | 525 // first data of the left channel and right channel. | 
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 497 #endif | 569 #endif | 
| 498 | 570 | 
| 499 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestWithKeyboardMicChannel) { | 571 TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestWithKeyboardMicChannel) { | 
| 500 MockMediaConstraintFactory constraint_factory; | 572 MockMediaConstraintFactory constraint_factory; | 
| 501 constraint_factory.AddMandatory( | 573 constraint_factory.AddMandatory( | 
| 502 MediaAudioConstraints::kGoogExperimentalNoiseSuppression, true); | 574 MediaAudioConstraints::kGoogExperimentalNoiseSuppression, true); | 
| 503 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 575 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 
| 504 new WebRtcAudioDeviceImpl()); | 576 new WebRtcAudioDeviceImpl()); | 
| 505 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 577 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 
| 506 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 578 new rtc::RefCountedObject<MediaStreamAudioProcessor>( | 
| 507 constraint_factory.CreateWebMediaConstraints(), 0, | 579 constraint_factory.CreateWebMediaConstraints(), input_device_params_, | 
| 508 webrtc_audio_device.get())); | 580 webrtc_audio_device.get())); | 
| 509 EXPECT_TRUE(audio_processor->has_audio_processing()); | 581 EXPECT_TRUE(audio_processor->has_audio_processing()); | 
| 510 | 582 | 
| 511 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 583 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 
| 512 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, | 584 media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, | 
| 513 48000, 16, 512); | 585 48000, 16, 512); | 
| 514 audio_processor->OnCaptureFormatChanged(params); | 586 audio_processor->OnCaptureFormatChanged(params); | 
| 515 | 587 | 
| 516 ProcessDataAndVerifyFormat(audio_processor.get(), | 588 ProcessDataAndVerifyFormat(audio_processor.get(), | 
| 517 kAudioProcessingSampleRate, | 589 kAudioProcessingSampleRate, | 
| 518 kAudioProcessingNumberOfChannel, | 590 kAudioProcessingNumberOfChannel, | 
| 519 kAudioProcessingSampleRate / 100); | 591 kAudioProcessingSampleRate / 100); | 
| 520 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 592 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 
| 521 // |audio_processor|. | 593 // |audio_processor|. | 
| 522 audio_processor = NULL; | 594 audio_processor = NULL; | 
| 523 } | 595 } | 
| 524 | 596 | 
| 525 using Point = webrtc::Point; | |
| 526 using PointVector = std::vector<Point>; | |
| 527 | |
| 528 void ExpectPointVectorEqual(const PointVector& expected, | |
| 529 const PointVector& actual) { | |
| 530 EXPECT_EQ(expected.size(), actual.size()); | |
| 531 for (size_t i = 0; i < actual.size(); ++i) { | |
| 532 EXPECT_EQ(expected[i].x(), actual[i].x()); | |
| 533 EXPECT_EQ(expected[i].y(), actual[i].y()); | |
| 534 EXPECT_EQ(expected[i].z(), actual[i].z()); | |
| 535 } | |
| 536 } | |
| 537 | |
| 538 TEST(MediaStreamAudioProcessorOptionsTest, ParseArrayGeometry) { | |
| 539 const PointVector expected_empty; | |
| 540 ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("")); | |
| 541 ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("0 0 a")); | |
| 542 ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("1 2")); | |
| 543 ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("1 2 3 4")); | |
| 544 | |
| 545 { | |
| 546 PointVector expected(1, Point(-0.02f, 0, 0)); | |
| 547 expected.push_back(Point(0.02f, 0, 0)); | |
| 548 ExpectPointVectorEqual(expected, ParseArrayGeometry("-0.02 0 0 0.02 0 0")); | |
| 549 } | |
| 550 { | |
| 551 PointVector expected(1, Point(1, 2, 3)); | |
| 552 ExpectPointVectorEqual(expected, ParseArrayGeometry("1 2 3")); | |
| 553 } | |
| 554 } | |
| 555 | |
| 556 } // namespace content | 597 } // namespace content | 
| OLD | NEW |