| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <vector> | 5 #include <vector> |
| 6 | 6 |
| 7 #include "base/environment.h" | 7 #include "base/environment.h" |
| 8 #include "base/file_util.h" | 8 #include "base/file_util.h" |
| 9 #include "base/files/file_path.h" | 9 #include "base/files/file_path.h" |
| 10 #include "base/path_service.h" | 10 #include "base/path_service.h" |
| (...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 242 | 242 |
| 243 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource { | 243 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource { |
| 244 public: | 244 public: |
| 245 explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event) | 245 explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event) |
| 246 : event_(event) { | 246 : event_(event) { |
| 247 DCHECK(event_); | 247 DCHECK(event_); |
| 248 } | 248 } |
| 249 virtual ~MockWebRtcAudioRendererSource() {} | 249 virtual ~MockWebRtcAudioRendererSource() {} |
| 250 | 250 |
| 251 // WebRtcAudioRendererSource implementation. | 251 // WebRtcAudioRendererSource implementation. |
| 252 virtual void RenderData(uint8* audio_data, | 252 virtual void RenderData(media::AudioBus* audio_bus, |
| 253 int number_of_channels, | 253 int sample_rate, |
| 254 int number_of_frames, | |
| 255 int audio_delay_milliseconds) OVERRIDE { | 254 int audio_delay_milliseconds) OVERRIDE { |
| 256 // Signal that a callback has been received. | 255 // Signal that a callback has been received. |
| 257 // Initialize the memory to zero to avoid uninitialized warning from | 256 // Initialize the memory to zero to avoid uninitialized warning from |
| 258 // Valgrind. | 257 // Valgrind. |
| 259 memset(audio_data, 0, | 258 audio_bus->Zero(); |
| 260 sizeof(int16) * number_of_channels * number_of_frames); | |
| 261 event_->Signal(); | 259 event_->Signal(); |
| 262 } | 260 } |
| 263 | 261 |
| 264 virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE { | |
| 265 } | |
| 266 | |
| 267 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {}; | 262 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {}; |
| 268 | 263 |
| 269 private: | 264 private: |
| 270 base::WaitableEvent* event_; | 265 base::WaitableEvent* event_; |
| 271 | 266 |
| 272 DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource); | 267 DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource); |
| 273 }; | 268 }; |
| 274 | 269 |
| 275 // Prints numerical information to stdout in a controlled format so we can plot | 270 // Prints numerical information to stdout in a controlled format so we can plot |
| 276 // the result. | 271 // the result. |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 324 bool enable_apm) { | 319 bool enable_apm) { |
| 325 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 320 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
| 326 new WebRtcAudioDeviceImpl()); | 321 new WebRtcAudioDeviceImpl()); |
| 327 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | 322 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); |
| 328 EXPECT_TRUE(engine.valid()); | 323 EXPECT_TRUE(engine.valid()); |
| 329 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | 324 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); |
| 330 EXPECT_TRUE(base.valid()); | 325 EXPECT_TRUE(base.valid()); |
| 331 int err = base->Init(webrtc_audio_device.get()); | 326 int err = base->Init(webrtc_audio_device.get()); |
| 332 EXPECT_EQ(0, err); | 327 EXPECT_EQ(0, err); |
| 333 | 328 |
| 334 // We use OnSetFormat() and SetRenderFormat() to configure the audio | 329 // We use OnSetFormat() to configure the audio parameters so that this |
| 335 // parameters so that this test can run on machine without hardware device. | 330 // test can run on machine without hardware device. |
| 336 const media::AudioParameters params = media::AudioParameters( | 331 const media::AudioParameters params = media::AudioParameters( |
| 337 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, | 332 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, |
| 338 48000, 2, 480); | 333 48000, 2, 480); |
| 339 PeerConnectionAudioSink* capturer_sink = | 334 PeerConnectionAudioSink* capturer_sink = |
| 340 static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get()); | 335 static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get()); |
| 341 WebRtcAudioRendererSource* renderer_source = | 336 WebRtcAudioRendererSource* renderer_source = |
| 342 static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get()); | 337 static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get()); |
| 343 renderer_source->SetRenderFormat(params); | |
| 344 | 338 |
| 345 // Turn on/off all the signal processing components like AGC, AEC and NS. | 339 // Turn on/off all the signal processing components like AGC, AEC and NS. |
| 346 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get()); | 340 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get()); |
| 347 EXPECT_TRUE(audio_processing.valid()); | 341 EXPECT_TRUE(audio_processing.valid()); |
| 348 audio_processing->SetAgcStatus(enable_apm); | 342 audio_processing->SetAgcStatus(enable_apm); |
| 349 audio_processing->SetNsStatus(enable_apm); | 343 audio_processing->SetNsStatus(enable_apm); |
| 350 audio_processing->SetEcStatus(enable_apm); | 344 audio_processing->SetEcStatus(enable_apm); |
| 351 | 345 |
| 352 // Create a voice channel for the WebRtc. | 346 // Create a voice channel for the WebRtc. |
| 353 int channel = base->CreateChannel(); | 347 int channel = base->CreateChannel(); |
| 354 EXPECT_NE(-1, channel); | 348 EXPECT_NE(-1, channel); |
| 355 SetChannelCodec(engine.get(), channel); | 349 SetChannelCodec(engine.get(), channel); |
| 356 | 350 |
| 357 // Use our fake network transmission and start playout and recording. | 351 // Use our fake network transmission and start playout and recording. |
| 358 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); | 352 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); |
| 359 EXPECT_TRUE(network.valid()); | 353 EXPECT_TRUE(network.valid()); |
| 360 scoped_ptr<WebRTCTransportImpl> transport( | 354 scoped_ptr<WebRTCTransportImpl> transport( |
| 361 new WebRTCTransportImpl(network.get())); | 355 new WebRTCTransportImpl(network.get())); |
| 362 EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get())); | 356 EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get())); |
| 363 EXPECT_EQ(0, base->StartPlayout(channel)); | 357 EXPECT_EQ(0, base->StartPlayout(channel)); |
| 364 EXPECT_EQ(0, base->StartSend(channel)); | 358 EXPECT_EQ(0, base->StartSend(channel)); |
| 365 | 359 |
| 366 // Read speech data from a speech test file. | 360 // Read speech data from a speech test file. |
| 367 const int input_packet_size = | 361 const int input_packet_size = |
| 368 params.frames_per_buffer() * 2 * params.channels(); | 362 params.frames_per_buffer() * 2 * params.channels(); |
| 369 const int num_output_channels = webrtc_audio_device->output_channels(); | |
| 370 const int output_packet_size = webrtc_audio_device->output_buffer_size() * 2 * | |
| 371 num_output_channels; | |
| 372 const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest; | 363 const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest; |
| 373 scoped_ptr<char[]> capture_data(new char[length]); | 364 scoped_ptr<char[]> capture_data(new char[length]); |
| 374 ReadDataFromSpeechFile(capture_data.get(), length); | 365 ReadDataFromSpeechFile(capture_data.get(), length); |
| 375 | 366 |
| 376 // Start the timer. | 367 // Start the timer. |
| 377 scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]); | 368 scoped_ptr<media::AudioBus> render_audio_bus(media::AudioBus::Create(params)); |
| 378 base::Time start_time = base::Time::Now(); | 369 base::Time start_time = base::Time::Now(); |
| 379 int delay = 0; | 370 int delay = 0; |
| 380 std::vector<int> voe_channels; | 371 std::vector<int> voe_channels; |
| 381 voe_channels.push_back(channel); | 372 voe_channels.push_back(channel); |
| 382 for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) { | 373 for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) { |
| 383 // Sending fake capture data to WebRtc. | 374 // Sending fake capture data to WebRtc. |
| 384 capturer_sink->OnData( | 375 capturer_sink->OnData( |
| 385 reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j), | 376 reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j), |
| 386 params.sample_rate(), | 377 params.sample_rate(), |
| 387 params.channels(), | 378 params.channels(), |
| 388 params.frames_per_buffer(), | 379 params.frames_per_buffer(), |
| 389 voe_channels, | 380 voe_channels, |
| 390 kHardwareLatencyInMs, | 381 kHardwareLatencyInMs, |
| 391 1.0, | 382 1.0, |
| 392 enable_apm, | 383 enable_apm, |
| 393 false); | 384 false); |
| 394 | 385 |
| 395 // Receiving data from WebRtc. | 386 // Receiving data from WebRtc. |
| 396 renderer_source->RenderData( | 387 renderer_source->RenderData( |
| 397 reinterpret_cast<uint8*>(buffer.get()), | 388 render_audio_bus.get(), params.sample_rate(), |
| 398 num_output_channels, webrtc_audio_device->output_buffer_size(), | |
| 399 kHardwareLatencyInMs + delay); | 389 kHardwareLatencyInMs + delay); |
| 400 delay = (base::Time::Now() - start_time).InMilliseconds(); | 390 delay = (base::Time::Now() - start_time).InMilliseconds(); |
| 401 } | 391 } |
| 402 | 392 |
| 403 int latency = (base::Time::Now() - start_time).InMilliseconds(); | 393 int latency = (base::Time::Now() - start_time).InMilliseconds(); |
| 404 | 394 |
| 405 EXPECT_EQ(0, base->StopSend(channel)); | 395 EXPECT_EQ(0, base->StopSend(channel)); |
| 406 EXPECT_EQ(0, base->StopPlayout(channel)); | 396 EXPECT_EQ(0, base->StopPlayout(channel)); |
| 407 EXPECT_EQ(0, base->DeleteChannel(channel)); | 397 EXPECT_EQ(0, base->DeleteChannel(channel)); |
| 408 EXPECT_EQ(0, base->Terminate()); | 398 EXPECT_EQ(0, base->Terminate()); |
| (...skipping 569 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 978 LOG(WARNING) << "Test disabled due to the test hangs on WinXP."; | 968 LOG(WARNING) << "Test disabled due to the test hangs on WinXP."; |
| 979 return; | 969 return; |
| 980 } | 970 } |
| 981 #endif | 971 #endif |
| 982 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true); | 972 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true); |
| 983 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)", | 973 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)", |
| 984 "t", latency); | 974 "t", latency); |
| 985 } | 975 } |
| 986 | 976 |
| 987 } // namespace content | 977 } // namespace content |
| OLD | NEW |