OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <vector> | 5 #include <vector> |
6 | 6 |
7 #include "base/environment.h" | 7 #include "base/environment.h" |
8 #include "base/file_util.h" | 8 #include "base/file_util.h" |
9 #include "base/files/file_path.h" | 9 #include "base/files/file_path.h" |
10 #include "base/path_service.h" | 10 #include "base/path_service.h" |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
239 | 239 |
240 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource { | 240 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource { |
241 public: | 241 public: |
242 explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event) | 242 explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event) |
243 : event_(event) { | 243 : event_(event) { |
244 DCHECK(event_); | 244 DCHECK(event_); |
245 } | 245 } |
246 virtual ~MockWebRtcAudioRendererSource() {} | 246 virtual ~MockWebRtcAudioRendererSource() {} |
247 | 247 |
248 // WebRtcAudioRendererSource implementation. | 248 // WebRtcAudioRendererSource implementation. |
249 virtual void RenderData(uint8* audio_data, | 249 virtual void RenderData(media::AudioBus* audio_bus, |
250 int number_of_channels, | 250 int sample_rate, |
251 int number_of_frames, | |
252 int audio_delay_milliseconds) OVERRIDE { | 251 int audio_delay_milliseconds) OVERRIDE { |
253 // Signal that a callback has been received. | 252 // Signal that a callback has been received. |
254 // Initialize the memory to zero to avoid uninitialized warning from | 253 // Initialize the memory to zero to avoid uninitialized warning from |
255 // Valgrind. | 254 // Valgrind. |
256 memset(audio_data, 0, | 255 audio_bus->Zero(); |
257 sizeof(int16) * number_of_channels * number_of_frames); | |
258 event_->Signal(); | 256 event_->Signal(); |
259 } | 257 } |
260 | 258 |
261 virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE { | |
262 } | |
263 | |
264 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {}; | 259 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {}; |
265 | 260 |
266 private: | 261 private: |
267 base::WaitableEvent* event_; | 262 base::WaitableEvent* event_; |
268 | 263 |
269 DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource); | 264 DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource); |
270 }; | 265 }; |
271 | 266 |
272 // Prints numerical information to stdout in a controlled format so we can plot | 267 // Prints numerical information to stdout in a controlled format so we can plot |
273 // the result. | 268 // the result. |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
321 bool enable_apm) { | 316 bool enable_apm) { |
322 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 317 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
323 new WebRtcAudioDeviceImpl()); | 318 new WebRtcAudioDeviceImpl()); |
324 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | 319 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); |
325 EXPECT_TRUE(engine.valid()); | 320 EXPECT_TRUE(engine.valid()); |
326 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | 321 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); |
327 EXPECT_TRUE(base.valid()); | 322 EXPECT_TRUE(base.valid()); |
328 int err = base->Init(webrtc_audio_device.get()); | 323 int err = base->Init(webrtc_audio_device.get()); |
329 EXPECT_EQ(0, err); | 324 EXPECT_EQ(0, err); |
330 | 325 |
331 // We use OnSetFormat() and SetRenderFormat() to configure the audio | 326 // We use OnSetFormat() to configure the audio parameters so that this |
332 // parameters so that this test can run on machine without hardware device. | 327 // test can run on machine without hardware device. |
333 const media::AudioParameters params = media::AudioParameters( | 328 const media::AudioParameters params = media::AudioParameters( |
334 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, | 329 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, |
335 48000, 2, 480); | 330 48000, 2, 480); |
336 PeerConnectionAudioSink* capturer_sink = | 331 PeerConnectionAudioSink* capturer_sink = |
337 static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get()); | 332 static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get()); |
338 WebRtcAudioRendererSource* renderer_source = | 333 WebRtcAudioRendererSource* renderer_source = |
339 static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get()); | 334 static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get()); |
340 renderer_source->SetRenderFormat(params); | |
341 | 335 |
342 // Turn on/off all the signal processing components like AGC, AEC and NS. | 336 // Turn on/off all the signal processing components like AGC, AEC and NS. |
343 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get()); | 337 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get()); |
344 EXPECT_TRUE(audio_processing.valid()); | 338 EXPECT_TRUE(audio_processing.valid()); |
345 audio_processing->SetAgcStatus(enable_apm); | 339 audio_processing->SetAgcStatus(enable_apm); |
346 audio_processing->SetNsStatus(enable_apm); | 340 audio_processing->SetNsStatus(enable_apm); |
347 audio_processing->SetEcStatus(enable_apm); | 341 audio_processing->SetEcStatus(enable_apm); |
348 | 342 |
349 // Create a voice channel for the WebRtc. | 343 // Create a voice channel for the WebRtc. |
350 int channel = base->CreateChannel(); | 344 int channel = base->CreateChannel(); |
351 EXPECT_NE(-1, channel); | 345 EXPECT_NE(-1, channel); |
352 SetChannelCodec(engine.get(), channel); | 346 SetChannelCodec(engine.get(), channel); |
353 | 347 |
354 // Use our fake network transmission and start playout and recording. | 348 // Use our fake network transmission and start playout and recording. |
355 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); | 349 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); |
356 EXPECT_TRUE(network.valid()); | 350 EXPECT_TRUE(network.valid()); |
357 scoped_ptr<WebRTCTransportImpl> transport( | 351 scoped_ptr<WebRTCTransportImpl> transport( |
358 new WebRTCTransportImpl(network.get())); | 352 new WebRTCTransportImpl(network.get())); |
359 EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get())); | 353 EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get())); |
360 EXPECT_EQ(0, base->StartPlayout(channel)); | 354 EXPECT_EQ(0, base->StartPlayout(channel)); |
361 EXPECT_EQ(0, base->StartSend(channel)); | 355 EXPECT_EQ(0, base->StartSend(channel)); |
362 | 356 |
363 // Read speech data from a speech test file. | 357 // Read speech data from a speech test file. |
364 const int input_packet_size = | 358 const int input_packet_size = |
365 params.frames_per_buffer() * 2 * params.channels(); | 359 params.frames_per_buffer() * 2 * params.channels(); |
366 const int num_output_channels = webrtc_audio_device->output_channels(); | |
367 const int output_packet_size = webrtc_audio_device->output_buffer_size() * 2 * | |
368 num_output_channels; | |
369 const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest; | 360 const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest; |
370 scoped_ptr<char[]> capture_data(new char[length]); | 361 scoped_ptr<char[]> capture_data(new char[length]); |
371 ReadDataFromSpeechFile(capture_data.get(), length); | 362 ReadDataFromSpeechFile(capture_data.get(), length); |
372 | 363 |
373 // Start the timer. | 364 // Start the timer. |
374 scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]); | 365 scoped_ptr<media::AudioBus> render_audio_bus(media::AudioBus::Create(params)); |
375 base::Time start_time = base::Time::Now(); | 366 base::Time start_time = base::Time::Now(); |
376 int delay = 0; | 367 int delay = 0; |
377 std::vector<int> voe_channels; | 368 std::vector<int> voe_channels; |
378 voe_channels.push_back(channel); | 369 voe_channels.push_back(channel); |
379 for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) { | 370 for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) { |
380 // Sending fake capture data to WebRtc. | 371 // Sending fake capture data to WebRtc. |
381 capturer_sink->OnData( | 372 capturer_sink->OnData( |
382 reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j), | 373 reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j), |
383 params.sample_rate(), | 374 params.sample_rate(), |
384 params.channels(), | 375 params.channels(), |
385 params.frames_per_buffer(), | 376 params.frames_per_buffer(), |
386 voe_channels, | 377 voe_channels, |
387 kHardwareLatencyInMs, | 378 kHardwareLatencyInMs, |
388 1.0, | 379 1.0, |
389 enable_apm, | 380 enable_apm, |
390 false); | 381 false); |
391 | 382 |
392 // Receiving data from WebRtc. | 383 // Receiving data from WebRtc. |
393 renderer_source->RenderData( | 384 renderer_source->RenderData( |
394 reinterpret_cast<uint8*>(buffer.get()), | 385 render_audio_bus.get(), params.sample_rate(), |
395 num_output_channels, webrtc_audio_device->output_buffer_size(), | |
396 kHardwareLatencyInMs + delay); | 386 kHardwareLatencyInMs + delay); |
397 delay = (base::Time::Now() - start_time).InMilliseconds(); | 387 delay = (base::Time::Now() - start_time).InMilliseconds(); |
398 } | 388 } |
399 | 389 |
400 int latency = (base::Time::Now() - start_time).InMilliseconds(); | 390 int latency = (base::Time::Now() - start_time).InMilliseconds(); |
401 | 391 |
402 EXPECT_EQ(0, base->StopSend(channel)); | 392 EXPECT_EQ(0, base->StopSend(channel)); |
403 EXPECT_EQ(0, base->StopPlayout(channel)); | 393 EXPECT_EQ(0, base->StopPlayout(channel)); |
404 EXPECT_EQ(0, base->DeleteChannel(channel)); | 394 EXPECT_EQ(0, base->DeleteChannel(channel)); |
405 EXPECT_EQ(0, base->Terminate()); | 395 EXPECT_EQ(0, base->Terminate()); |
(...skipping 563 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
969 LOG(WARNING) << "Test disabled due to the test hangs on WinXP."; | 959 LOG(WARNING) << "Test disabled due to the test hangs on WinXP."; |
970 return; | 960 return; |
971 } | 961 } |
972 #endif | 962 #endif |
973 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true); | 963 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true); |
974 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)", | 964 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)", |
975 "t", latency); | 965 "t", latency); |
976 } | 966 } |
977 | 967 |
978 } // namespace content | 968 } // namespace content |
OLD | NEW |