| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/command_line.h" | 5 #include "base/command_line.h" |
| 6 #include "base/file_util.h" | 6 #include "base/file_util.h" |
| 7 #include "base/files/file_path.h" | 7 #include "base/files/file_path.h" |
| 8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/memory/aligned_memory.h" | 9 #include "base/memory/aligned_memory.h" |
| 10 #include "base/path_service.h" | 10 #include "base/path_service.h" |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 155 }; | 155 }; |
| 156 | 156 |
| 157 TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) { | 157 TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) { |
| 158 // Setup the audio processor with disabled flag on. | 158 // Setup the audio processor with disabled flag on. |
| 159 CommandLine::ForCurrentProcess()->AppendSwitch( | 159 CommandLine::ForCurrentProcess()->AppendSwitch( |
| 160 switches::kDisableAudioTrackProcessing); | 160 switches::kDisableAudioTrackProcessing); |
| 161 MockMediaConstraintFactory constraint_factory; | 161 MockMediaConstraintFactory constraint_factory; |
| 162 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 162 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
| 163 new WebRtcAudioDeviceImpl()); | 163 new WebRtcAudioDeviceImpl()); |
| 164 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 164 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
| 165 new talk_base::RefCountedObject<MediaStreamAudioProcessor>( | 165 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 166 constraint_factory.CreateWebMediaConstraints(), 0, | 166 constraint_factory.CreateWebMediaConstraints(), 0, |
| 167 webrtc_audio_device.get())); | 167 webrtc_audio_device.get())); |
| 168 EXPECT_FALSE(audio_processor->has_audio_processing()); | 168 EXPECT_FALSE(audio_processor->has_audio_processing()); |
| 169 audio_processor->OnCaptureFormatChanged(params_); | 169 audio_processor->OnCaptureFormatChanged(params_); |
| 170 | 170 |
| 171 ProcessDataAndVerifyFormat(audio_processor, | 171 ProcessDataAndVerifyFormat(audio_processor, |
| 172 params_.sample_rate(), | 172 params_.sample_rate(), |
| 173 params_.channels(), | 173 params_.channels(), |
| 174 params_.sample_rate() / 100); | 174 params_.sample_rate() / 100); |
| 175 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 175 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
| 176 // |audio_processor|. | 176 // |audio_processor|. |
| 177 audio_processor = NULL; | 177 audio_processor = NULL; |
| 178 } | 178 } |
| 179 | 179 |
| 180 TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) { | 180 TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) { |
| 181 MockMediaConstraintFactory constraint_factory; | 181 MockMediaConstraintFactory constraint_factory; |
| 182 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 182 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
| 183 new WebRtcAudioDeviceImpl()); | 183 new WebRtcAudioDeviceImpl()); |
| 184 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 184 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
| 185 new talk_base::RefCountedObject<MediaStreamAudioProcessor>( | 185 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 186 constraint_factory.CreateWebMediaConstraints(), 0, | 186 constraint_factory.CreateWebMediaConstraints(), 0, |
| 187 webrtc_audio_device.get())); | 187 webrtc_audio_device.get())); |
| 188 EXPECT_TRUE(audio_processor->has_audio_processing()); | 188 EXPECT_TRUE(audio_processor->has_audio_processing()); |
| 189 audio_processor->OnCaptureFormatChanged(params_); | 189 audio_processor->OnCaptureFormatChanged(params_); |
| 190 VerifyDefaultComponents(audio_processor); | 190 VerifyDefaultComponents(audio_processor); |
| 191 | 191 |
| 192 ProcessDataAndVerifyFormat(audio_processor, | 192 ProcessDataAndVerifyFormat(audio_processor, |
| 193 kAudioProcessingSampleRate, | 193 kAudioProcessingSampleRate, |
| 194 kAudioProcessingNumberOfChannel, | 194 kAudioProcessingNumberOfChannel, |
| 195 kAudioProcessingSampleRate / 100); | 195 kAudioProcessingSampleRate / 100); |
| 196 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 196 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
| 197 // |audio_processor|. | 197 // |audio_processor|. |
| 198 audio_processor = NULL; | 198 audio_processor = NULL; |
| 199 } | 199 } |
| 200 | 200 |
| 201 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { | 201 TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { |
| 202 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 202 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
| 203 new WebRtcAudioDeviceImpl()); | 203 new WebRtcAudioDeviceImpl()); |
| 204 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source. | 204 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceTab source. |
| 205 MockMediaConstraintFactory tab_constraint_factory; | 205 MockMediaConstraintFactory tab_constraint_factory; |
| 206 const std::string tab_string = kMediaStreamSourceTab; | 206 const std::string tab_string = kMediaStreamSourceTab; |
| 207 tab_constraint_factory.AddMandatory(kMediaStreamSource, | 207 tab_constraint_factory.AddMandatory(kMediaStreamSource, |
| 208 tab_string); | 208 tab_string); |
| 209 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 209 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
| 210 new talk_base::RefCountedObject<MediaStreamAudioProcessor>( | 210 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 211 tab_constraint_factory.CreateWebMediaConstraints(), 0, | 211 tab_constraint_factory.CreateWebMediaConstraints(), 0, |
| 212 webrtc_audio_device.get())); | 212 webrtc_audio_device.get())); |
| 213 EXPECT_FALSE(audio_processor->has_audio_processing()); | 213 EXPECT_FALSE(audio_processor->has_audio_processing()); |
| 214 audio_processor->OnCaptureFormatChanged(params_); | 214 audio_processor->OnCaptureFormatChanged(params_); |
| 215 | 215 |
| 216 ProcessDataAndVerifyFormat(audio_processor, | 216 ProcessDataAndVerifyFormat(audio_processor, |
| 217 params_.sample_rate(), | 217 params_.sample_rate(), |
| 218 params_.channels(), | 218 params_.channels(), |
| 219 params_.sample_rate() / 100); | 219 params_.sample_rate() / 100); |
| 220 | 220 |
| 221 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem | 221 // Create MediaStreamAudioProcessor instance for kMediaStreamSourceSystem |
| 222 // source. | 222 // source. |
| 223 MockMediaConstraintFactory system_constraint_factory; | 223 MockMediaConstraintFactory system_constraint_factory; |
| 224 const std::string system_string = kMediaStreamSourceSystem; | 224 const std::string system_string = kMediaStreamSourceSystem; |
| 225 system_constraint_factory.AddMandatory(kMediaStreamSource, | 225 system_constraint_factory.AddMandatory(kMediaStreamSource, |
| 226 system_string); | 226 system_string); |
| 227 audio_processor = new talk_base::RefCountedObject<MediaStreamAudioProcessor>( | 227 audio_processor = new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 228 system_constraint_factory.CreateWebMediaConstraints(), 0, | 228 system_constraint_factory.CreateWebMediaConstraints(), 0, |
| 229 webrtc_audio_device.get()); | 229 webrtc_audio_device.get()); |
| 230 EXPECT_FALSE(audio_processor->has_audio_processing()); | 230 EXPECT_FALSE(audio_processor->has_audio_processing()); |
| 231 | 231 |
| 232 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 232 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
| 233 // |audio_processor|. | 233 // |audio_processor|. |
| 234 audio_processor = NULL; | 234 audio_processor = NULL; |
| 235 } | 235 } |
| 236 | 236 |
| 237 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) { | 237 TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) { |
| 238 // Turn off the default constraints and pass it to MediaStreamAudioProcessor. | 238 // Turn off the default constraints and pass it to MediaStreamAudioProcessor. |
| 239 MockMediaConstraintFactory constraint_factory; | 239 MockMediaConstraintFactory constraint_factory; |
| 240 constraint_factory.DisableDefaultAudioConstraints(); | 240 constraint_factory.DisableDefaultAudioConstraints(); |
| 241 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 241 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
| 242 new WebRtcAudioDeviceImpl()); | 242 new WebRtcAudioDeviceImpl()); |
| 243 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 243 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
| 244 new talk_base::RefCountedObject<MediaStreamAudioProcessor>( | 244 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 245 constraint_factory.CreateWebMediaConstraints(), 0, | 245 constraint_factory.CreateWebMediaConstraints(), 0, |
| 246 webrtc_audio_device.get())); | 246 webrtc_audio_device.get())); |
| 247 EXPECT_FALSE(audio_processor->has_audio_processing()); | 247 EXPECT_FALSE(audio_processor->has_audio_processing()); |
| 248 audio_processor->OnCaptureFormatChanged(params_); | 248 audio_processor->OnCaptureFormatChanged(params_); |
| 249 | 249 |
| 250 ProcessDataAndVerifyFormat(audio_processor, | 250 ProcessDataAndVerifyFormat(audio_processor, |
| 251 params_.sample_rate(), | 251 params_.sample_rate(), |
| 252 params_.channels(), | 252 params_.channels(), |
| 253 params_.sample_rate() / 100); | 253 params_.sample_rate() / 100); |
| 254 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 254 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 350 MediaAudioConstraints audio_constraints( | 350 MediaAudioConstraints audio_constraints( |
| 351 constraint_factory.CreateWebMediaConstraints(), 0); | 351 constraint_factory.CreateWebMediaConstraints(), 0); |
| 352 EXPECT_FALSE(audio_constraints.IsValid()); | 352 EXPECT_FALSE(audio_constraints.IsValid()); |
| 353 } | 353 } |
| 354 | 354 |
| 355 TEST_F(MediaStreamAudioProcessorTest, TestAllSampleRates) { | 355 TEST_F(MediaStreamAudioProcessorTest, TestAllSampleRates) { |
| 356 MockMediaConstraintFactory constraint_factory; | 356 MockMediaConstraintFactory constraint_factory; |
| 357 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 357 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
| 358 new WebRtcAudioDeviceImpl()); | 358 new WebRtcAudioDeviceImpl()); |
| 359 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 359 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
| 360 new talk_base::RefCountedObject<MediaStreamAudioProcessor>( | 360 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 361 constraint_factory.CreateWebMediaConstraints(), 0, | 361 constraint_factory.CreateWebMediaConstraints(), 0, |
| 362 webrtc_audio_device.get())); | 362 webrtc_audio_device.get())); |
| 363 EXPECT_TRUE(audio_processor->has_audio_processing()); | 363 EXPECT_TRUE(audio_processor->has_audio_processing()); |
| 364 | 364 |
| 365 static const int kSupportedSampleRates[] = | 365 static const int kSupportedSampleRates[] = |
| 366 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 }; | 366 { 8000, 16000, 22050, 32000, 44100, 48000, 88200, 96000 }; |
| 367 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) { | 367 for (size_t i = 0; i < arraysize(kSupportedSampleRates); ++i) { |
| 368 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ? | 368 int buffer_size = (kSupportedSampleRates[i] / 100) < 128 ? |
| 369 kSupportedSampleRates[i] / 100 : 128; | 369 kSupportedSampleRates[i] / 100 : 128; |
| 370 media::AudioParameters params( | 370 media::AudioParameters params( |
| (...skipping 20 matching lines...) Expand all Loading... |
| 391 TEST_F(MediaStreamAudioProcessorTest, GetAecDumpMessageFilter) { | 391 TEST_F(MediaStreamAudioProcessorTest, GetAecDumpMessageFilter) { |
| 392 base::MessageLoopForUI message_loop; | 392 base::MessageLoopForUI message_loop; |
| 393 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_( | 393 scoped_refptr<AecDumpMessageFilter> aec_dump_message_filter_( |
| 394 new AecDumpMessageFilter(message_loop.message_loop_proxy(), | 394 new AecDumpMessageFilter(message_loop.message_loop_proxy(), |
| 395 message_loop.message_loop_proxy())); | 395 message_loop.message_loop_proxy())); |
| 396 | 396 |
| 397 MockMediaConstraintFactory constraint_factory; | 397 MockMediaConstraintFactory constraint_factory; |
| 398 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 398 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
| 399 new WebRtcAudioDeviceImpl()); | 399 new WebRtcAudioDeviceImpl()); |
| 400 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 400 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
| 401 new talk_base::RefCountedObject<MediaStreamAudioProcessor>( | 401 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 402 constraint_factory.CreateWebMediaConstraints(), 0, | 402 constraint_factory.CreateWebMediaConstraints(), 0, |
| 403 webrtc_audio_device.get())); | 403 webrtc_audio_device.get())); |
| 404 | 404 |
| 405 EXPECT_TRUE(audio_processor->aec_dump_message_filter_); | 405 EXPECT_TRUE(audio_processor->aec_dump_message_filter_); |
| 406 | 406 |
| 407 audio_processor = NULL; | 407 audio_processor = NULL; |
| 408 } | 408 } |
| 409 | 409 |
| 410 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) { | 410 TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) { |
| 411 // Set up the correct constraints to turn off the audio processing and turn | 411 // Set up the correct constraints to turn off the audio processing and turn |
| 412 // on the stereo channels mirroring. | 412 // on the stereo channels mirroring. |
| 413 MockMediaConstraintFactory constraint_factory; | 413 MockMediaConstraintFactory constraint_factory; |
| 414 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation, | 414 constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation, |
| 415 false); | 415 false); |
| 416 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring, | 416 constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring, |
| 417 true); | 417 true); |
| 418 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 418 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
| 419 new WebRtcAudioDeviceImpl()); | 419 new WebRtcAudioDeviceImpl()); |
| 420 scoped_refptr<MediaStreamAudioProcessor> audio_processor( | 420 scoped_refptr<MediaStreamAudioProcessor> audio_processor( |
| 421 new talk_base::RefCountedObject<MediaStreamAudioProcessor>( | 421 new rtc::RefCountedObject<MediaStreamAudioProcessor>( |
| 422 constraint_factory.CreateWebMediaConstraints(), 0, | 422 constraint_factory.CreateWebMediaConstraints(), 0, |
| 423 webrtc_audio_device.get())); | 423 webrtc_audio_device.get())); |
| 424 EXPECT_FALSE(audio_processor->has_audio_processing()); | 424 EXPECT_FALSE(audio_processor->has_audio_processing()); |
| 425 const media::AudioParameters source_params( | 425 const media::AudioParameters source_params( |
| 426 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 426 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 427 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480); | 427 media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480); |
| 428 audio_processor->OnCaptureFormatChanged(source_params); | 428 audio_processor->OnCaptureFormatChanged(source_params); |
| 429 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2); | 429 EXPECT_EQ(audio_processor->OutputFormat().channels(), 2); |
| 430 | 430 |
| 431 // Construct left and right channels, and assign different values to the | 431 // Construct left and right channels, and assign different values to the |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 463 EXPECT_EQ(output_bus->channel(0)[0], 0); | 463 EXPECT_EQ(output_bus->channel(0)[0], 0); |
| 464 EXPECT_NE(output_bus->channel(1)[0], 0); | 464 EXPECT_NE(output_bus->channel(1)[0], 0); |
| 465 } | 465 } |
| 466 | 466 |
| 467 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives | 467 // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives |
| 468 // |audio_processor|. | 468 // |audio_processor|. |
| 469 audio_processor = NULL; | 469 audio_processor = NULL; |
| 470 } | 470 } |
| 471 | 471 |
| 472 } // namespace content | 472 } // namespace content |
| OLD | NEW |