OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <vector> | 5 #include <vector> |
6 | 6 |
7 #include "base/environment.h" | 7 #include "base/environment.h" |
8 #include "base/file_util.h" | 8 #include "base/file_util.h" |
9 #include "base/files/file_path.h" | 9 #include "base/files/file_path.h" |
10 #include "base/path_service.h" | 10 #include "base/path_service.h" |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
96 int output_sample_rate = hardware_config->GetOutputSampleRate(); | 96 int output_sample_rate = hardware_config->GetOutputSampleRate(); |
97 if (!FindElementInArray(valid_output_rates, arraysize(valid_output_rates), | 97 if (!FindElementInArray(valid_output_rates, arraysize(valid_output_rates), |
98 output_sample_rate)) { | 98 output_sample_rate)) { |
99 LOG(WARNING) << "Non-supported output sample rate detected."; | 99 LOG(WARNING) << "Non-supported output sample rate detected."; |
100 return false; | 100 return false; |
101 } | 101 } |
102 | 102 |
103 return true; | 103 return true; |
104 } | 104 } |
105 | 105 |
106 // Utility method which creates and initializes the audio capturer and adds it | 106 // Utility method which creates the audio capturer, it returns a scoped |
107 // to WebRTC audio device. This method should be used in tests where | 107 // reference of the capturer if it is created successfully, otherwise it returns |
| 108 // NULL. This method should be used in tests where |
108 // HardwareSampleRatesAreValid() has been called and returned true. | 109 // HardwareSampleRatesAreValid() has been called and returned true. |
109 bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) { | 110 scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer( |
110 DCHECK(webrtc_audio_device); | 111 WebRtcAudioDeviceImpl* webrtc_audio_device) { |
111 scoped_refptr<WebRtcAudioCapturer> capturer( | |
112 WebRtcAudioCapturer::CreateCapturer()); | |
113 | |
114 media::AudioHardwareConfig* hardware_config = | 112 media::AudioHardwareConfig* hardware_config = |
115 RenderThreadImpl::current()->GetAudioHardwareConfig(); | 113 RenderThreadImpl::current()->GetAudioHardwareConfig(); |
116 | |
117 // Use native capture sample rate and channel configuration to get some | 114 // Use native capture sample rate and channel configuration to get some |
118 // action in this test. | 115 // action in this test. |
119 int sample_rate = hardware_config->GetInputSampleRate(); | 116 int sample_rate = hardware_config->GetInputSampleRate(); |
120 media::ChannelLayout channel_layout = | 117 media::ChannelLayout channel_layout = |
121 hardware_config->GetInputChannelLayout(); | 118 hardware_config->GetInputChannelLayout(); |
122 if (!capturer->Initialize(kRenderViewId, channel_layout, sample_rate, 0, 1, | |
123 media::AudioManagerBase::kDefaultDeviceId, 0, 0, | |
124 media::AudioParameters::NO_EFFECTS)) { | |
125 return false; | |
126 } | |
127 | 119 |
128 // Add the capturer to the WebRtcAudioDeviceImpl. | 120 StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE, |
129 webrtc_audio_device->AddAudioCapturer(capturer); | 121 media::AudioManagerBase::kDefaultDeviceName, |
130 | 122 media::AudioManagerBase::kDefaultDeviceId, |
131 return true; | 123 sample_rate, channel_layout, 0); |
| 124 device.session_id = 1; |
| 125 return WebRtcAudioCapturer::CreateCapturer(kRenderViewId, device, |
| 126 webrtc_audio_device); |
132 } | 127 } |
133 | 128 |
134 // Create and start a local audio track. Starting the audio track will connect | 129 // Create and start a local audio track. Starting the audio track will connect |
135 // the audio track to the capturer and also start the source of the capturer. | 130 // the audio track to the capturer and also start the source of the capturer. |
136 // Also, connect the sink to the audio track. | 131 // Also, connect the sink to the audio track. |
137 scoped_refptr<WebRtcLocalAudioTrack> | 132 scoped_refptr<WebRtcLocalAudioTrack> |
138 CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer, | 133 CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer, |
139 PeerConnectionAudioSink* sink) { | 134 PeerConnectionAudioSink* sink) { |
140 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( | 135 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( |
141 WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL, NULL)); | 136 WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL, NULL)); |
(...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
481 SetAudioHardwareConfig(&audio_config); | 476 SetAudioHardwareConfig(&audio_config); |
482 | 477 |
483 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | 478 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( |
484 new WebRtcAudioDeviceImpl()); | 479 new WebRtcAudioDeviceImpl()); |
485 | 480 |
486 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | 481 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); |
487 ASSERT_TRUE(engine.valid()); | 482 ASSERT_TRUE(engine.valid()); |
488 | 483 |
489 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | 484 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); |
490 int err = base->Init(webrtc_audio_device.get()); | 485 int err = base->Init(webrtc_audio_device.get()); |
491 EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); | 486 EXPECT_TRUE(CreateAudioCapturer(webrtc_audio_device) != NULL); |
492 EXPECT_EQ(0, err); | 487 EXPECT_EQ(0, err); |
493 EXPECT_EQ(0, base->Terminate()); | 488 EXPECT_EQ(0, base->Terminate()); |
494 } | 489 } |
495 | 490 |
496 // Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output | 491 // Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output |
497 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will | 492 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will |
498 // be utilized to implement the actual audio path. The test registers a | 493 // be utilized to implement the actual audio path. The test registers a |
499 // webrtc::VoEExternalMedia implementation to hijack the output audio and | 494 // webrtc::VoEExternalMedia implementation to hijack the output audio and |
500 // verify that streaming starts correctly. | 495 // verify that streaming starts correctly. |
501 // TODO(henrika): include on Android as well as soon as alla race conditions | 496 // TODO(henrika): include on Android as well as soon as alla race conditions |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
630 | 625 |
631 // We must add an external transport implementation to be able to start | 626 // We must add an external transport implementation to be able to start |
632 // recording without actually sending encoded packets to the network. All | 627 // recording without actually sending encoded packets to the network. All |
633 // we want to do here is to verify that audio capturing starts as it should. | 628 // we want to do here is to verify that audio capturing starts as it should. |
634 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); | 629 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); |
635 scoped_ptr<WebRTCTransportImpl> transport( | 630 scoped_ptr<WebRTCTransportImpl> transport( |
636 new WebRTCTransportImpl(network.get())); | 631 new WebRTCTransportImpl(network.get())); |
637 EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get())); | 632 EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get())); |
638 EXPECT_EQ(0, base->StartSend(ch)); | 633 EXPECT_EQ(0, base->StartSend(ch)); |
639 | 634 |
640 // Create and initialize the capturer which starts the source of the data | 635 // Create the capturer which starts the source of the data flow. |
641 // flow. | 636 scoped_refptr<WebRtcAudioCapturer> capturer( |
642 EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); | 637 CreateAudioCapturer(webrtc_audio_device)); |
| 638 EXPECT_TRUE(capturer); |
643 | 639 |
644 // Create and start a local audio track which is bridging the data flow | 640 // Create and start a local audio track which is bridging the data flow |
645 // between the capturer and WebRtcAudioDeviceImpl. | 641 // between the capturer and WebRtcAudioDeviceImpl. |
646 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( | 642 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( |
647 CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(), | 643 CreateAndStartLocalAudioTrack(capturer, webrtc_audio_device)); |
648 webrtc_audio_device)); | |
649 // connect the VoE voice channel to the audio track | 644 // connect the VoE voice channel to the audio track |
650 static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> | 645 static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> |
651 GetRenderer()->AddChannel(ch); | 646 GetRenderer()->AddChannel(ch); |
652 | 647 |
653 // Verify we get the data flow. | 648 // Verify we get the data flow. |
654 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); | 649 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); |
655 WaitForIOThreadCompletion(); | 650 WaitForIOThreadCompletion(); |
656 | 651 |
657 EXPECT_FALSE(webrtc_audio_device->Playing()); | 652 EXPECT_FALSE(webrtc_audio_device->Playing()); |
658 EXPECT_TRUE(webrtc_audio_device->Recording()); | 653 EXPECT_TRUE(webrtc_audio_device->Recording()); |
659 EXPECT_EQ(ch, media_process->channel_id()); | 654 EXPECT_EQ(ch, media_process->channel_id()); |
660 EXPECT_EQ(webrtc::kRecordingPerChannel, media_process->type()); | 655 EXPECT_EQ(webrtc::kRecordingPerChannel, media_process->type()); |
661 EXPECT_EQ(80, media_process->packet_size()); | 656 EXPECT_EQ(80, media_process->packet_size()); |
662 EXPECT_EQ(8000, media_process->sample_rate()); | 657 EXPECT_EQ(8000, media_process->sample_rate()); |
663 | 658 |
664 EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing( | 659 EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing( |
665 ch, webrtc::kRecordingPerChannel)); | 660 ch, webrtc::kRecordingPerChannel)); |
666 EXPECT_EQ(0, base->StopSend(ch)); | 661 EXPECT_EQ(0, base->StopSend(ch)); |
667 | 662 |
668 webrtc_audio_device->GetDefaultCapturer()->Stop(); | 663 capturer->Stop(); |
669 EXPECT_EQ(0, base->DeleteChannel(ch)); | 664 EXPECT_EQ(0, base->DeleteChannel(ch)); |
670 EXPECT_EQ(0, base->Terminate()); | 665 EXPECT_EQ(0, base->Terminate()); |
671 } | 666 } |
672 | 667 |
673 // Uses WebRtcAudioDeviceImpl to play a local wave file. | 668 // Uses WebRtcAudioDeviceImpl to play a local wave file. |
674 // TODO(henrika): include on Android as well as soon as alla race conditions | 669 // TODO(henrika): include on Android as well as soon as alla race conditions |
675 // in OpenSLES are resolved. | 670 // in OpenSLES are resolved. |
676 #if defined(OS_ANDROID) | 671 #if defined(OS_ANDROID) |
677 #define MAYBE_PlayLocalFile DISABLED_PlayLocalFile | 672 #define MAYBE_PlayLocalFile DISABLED_PlayLocalFile |
678 #else | 673 #else |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
787 bool enabled = false; | 782 bool enabled = false; |
788 webrtc::AgcModes agc_mode = webrtc::kAgcDefault; | 783 webrtc::AgcModes agc_mode = webrtc::kAgcDefault; |
789 EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode)); | 784 EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode)); |
790 EXPECT_TRUE(enabled); | 785 EXPECT_TRUE(enabled); |
791 EXPECT_EQ(agc_mode, webrtc::kAgcAdaptiveAnalog); | 786 EXPECT_EQ(agc_mode, webrtc::kAgcAdaptiveAnalog); |
792 #endif | 787 #endif |
793 | 788 |
794 int ch = base->CreateChannel(); | 789 int ch = base->CreateChannel(); |
795 EXPECT_NE(-1, ch); | 790 EXPECT_NE(-1, ch); |
796 | 791 |
797 EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); | 792 scoped_refptr<WebRtcAudioCapturer> capturer( |
| 793 CreateAudioCapturer(webrtc_audio_device)); |
| 794 EXPECT_TRUE(capturer); |
798 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( | 795 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( |
799 CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(), | 796 CreateAndStartLocalAudioTrack(capturer, webrtc_audio_device)); |
800 webrtc_audio_device)); | |
801 // connect the VoE voice channel to the audio track | 797 // connect the VoE voice channel to the audio track |
802 static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> | 798 static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> |
803 GetRenderer()->AddChannel(ch); | 799 GetRenderer()->AddChannel(ch); |
804 | 800 |
805 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); | 801 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); |
806 ASSERT_TRUE(network.valid()); | 802 ASSERT_TRUE(network.valid()); |
807 scoped_ptr<WebRTCTransportImpl> transport( | 803 scoped_ptr<WebRTCTransportImpl> transport( |
808 new WebRTCTransportImpl(network.get())); | 804 new WebRTCTransportImpl(network.get())); |
809 EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get())); | 805 EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get())); |
810 EXPECT_EQ(0, base->StartPlayout(ch)); | 806 EXPECT_EQ(0, base->StartPlayout(ch)); |
811 EXPECT_EQ(0, base->StartSend(ch)); | 807 EXPECT_EQ(0, base->StartSend(ch)); |
812 scoped_refptr<WebRtcAudioRenderer> renderer( | 808 scoped_refptr<WebRtcAudioRenderer> renderer( |
813 CreateDefaultWebRtcAudioRenderer(kRenderViewId)); | 809 CreateDefaultWebRtcAudioRenderer(kRenderViewId)); |
814 scoped_refptr<MediaStreamAudioRenderer> proxy( | 810 scoped_refptr<MediaStreamAudioRenderer> proxy( |
815 renderer->CreateSharedAudioRendererProxy()); | 811 renderer->CreateSharedAudioRendererProxy()); |
816 EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get())); | 812 EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get())); |
817 proxy->Start(); | 813 proxy->Start(); |
818 proxy->Play(); | 814 proxy->Play(); |
819 | 815 |
820 VLOG(0) << ">> You should now be able to hear yourself in loopback..."; | 816 VLOG(0) << ">> You should now be able to hear yourself in loopback..."; |
821 message_loop_.PostDelayedTask(FROM_HERE, | 817 message_loop_.PostDelayedTask(FROM_HERE, |
822 base::MessageLoop::QuitClosure(), | 818 base::MessageLoop::QuitClosure(), |
823 base::TimeDelta::FromSeconds(2)); | 819 base::TimeDelta::FromSeconds(2)); |
824 message_loop_.Run(); | 820 message_loop_.Run(); |
825 | 821 |
826 webrtc_audio_device->GetDefaultCapturer()->Stop(); | 822 capturer->Stop(); |
827 proxy->Stop(); | 823 proxy->Stop(); |
828 EXPECT_EQ(0, base->StopSend(ch)); | 824 EXPECT_EQ(0, base->StopSend(ch)); |
829 EXPECT_EQ(0, base->StopPlayout(ch)); | 825 EXPECT_EQ(0, base->StopPlayout(ch)); |
830 | 826 |
831 EXPECT_EQ(0, base->DeleteChannel(ch)); | 827 EXPECT_EQ(0, base->DeleteChannel(ch)); |
832 EXPECT_EQ(0, base->Terminate()); | 828 EXPECT_EQ(0, base->Terminate()); |
833 } | 829 } |
834 | 830 |
835 // Test times out on bots, see http://crbug.com/247447 | 831 // Test times out on bots, see http://crbug.com/247447 |
836 TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) { | 832 TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) { |
(...skipping 16 matching lines...) Expand all Loading... |
853 ASSERT_TRUE(engine.valid()); | 849 ASSERT_TRUE(engine.valid()); |
854 | 850 |
855 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | 851 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); |
856 ASSERT_TRUE(base.valid()); | 852 ASSERT_TRUE(base.valid()); |
857 int err = base->Init(webrtc_audio_device.get()); | 853 int err = base->Init(webrtc_audio_device.get()); |
858 ASSERT_EQ(0, err); | 854 ASSERT_EQ(0, err); |
859 | 855 |
860 int ch = base->CreateChannel(); | 856 int ch = base->CreateChannel(); |
861 EXPECT_NE(-1, ch); | 857 EXPECT_NE(-1, ch); |
862 | 858 |
863 EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); | 859 scoped_refptr<WebRtcAudioCapturer> capturer( |
| 860 CreateAudioCapturer(webrtc_audio_device)); |
| 861 EXPECT_TRUE(capturer); |
864 base::WaitableEvent event(false, false); | 862 base::WaitableEvent event(false, false); |
865 scoped_ptr<MockMediaStreamAudioSink> sink( | 863 scoped_ptr<MockMediaStreamAudioSink> sink( |
866 new MockMediaStreamAudioSink(&event)); | 864 new MockMediaStreamAudioSink(&event)); |
867 | 865 |
868 // Create and start a local audio track. Starting the audio track will connect | 866 // Create and start a local audio track. Starting the audio track will connect |
869 // the audio track to the capturer and also start the source of the capturer. | 867 // the audio track to the capturer and also start the source of the capturer. |
870 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( | 868 scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( |
871 CreateAndStartLocalAudioTrack( | 869 CreateAndStartLocalAudioTrack(capturer, sink.get())); |
872 webrtc_audio_device->GetDefaultCapturer().get(), sink.get())); | |
873 | 870 |
874 // connect the VoE voice channel to the audio track. | 871 // connect the VoE voice channel to the audio track. |
875 static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> | 872 static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> |
876 GetRenderer()->AddChannel(ch); | 873 GetRenderer()->AddChannel(ch); |
877 | 874 |
878 base::Time start_time = base::Time::Now(); | 875 base::Time start_time = base::Time::Now(); |
879 EXPECT_EQ(0, base->StartSend(ch)); | 876 EXPECT_EQ(0, base->StartSend(ch)); |
880 | 877 |
881 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); | 878 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); |
882 int delay = (base::Time::Now() - start_time).InMilliseconds(); | 879 int delay = (base::Time::Now() - start_time).InMilliseconds(); |
883 PrintPerfResultMs("webrtc_recording_setup_c", "t", delay); | 880 PrintPerfResultMs("webrtc_recording_setup_c", "t", delay); |
884 | 881 |
885 webrtc_audio_device->GetDefaultCapturer()->Stop(); | 882 capturer->Stop(); |
886 EXPECT_EQ(0, base->StopSend(ch)); | 883 EXPECT_EQ(0, base->StopSend(ch)); |
887 EXPECT_EQ(0, base->DeleteChannel(ch)); | 884 EXPECT_EQ(0, base->DeleteChannel(ch)); |
888 EXPECT_EQ(0, base->Terminate()); | 885 EXPECT_EQ(0, base->Terminate()); |
889 } | 886 } |
890 | 887 |
891 | 888 |
892 // TODO(henrika): include on Android as well as soon as alla race conditions | 889 // TODO(henrika): include on Android as well as soon as alla race conditions |
893 // in OpenSLES are resolved. | 890 // in OpenSLES are resolved. |
894 #if defined(OS_ANDROID) | 891 #if defined(OS_ANDROID) |
895 #define MAYBE_WebRtcPlayoutSetupTime DISABLED_WebRtcPlayoutSetupTime | 892 #define MAYBE_WebRtcPlayoutSetupTime DISABLED_WebRtcPlayoutSetupTime |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
970 LOG(WARNING) << "Test disabled due to the test hangs on WinXP."; | 967 LOG(WARNING) << "Test disabled due to the test hangs on WinXP."; |
971 return; | 968 return; |
972 } | 969 } |
973 #endif | 970 #endif |
974 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true); | 971 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true); |
975 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)", | 972 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)", |
976 "t", latency); | 973 "t", latency); |
977 } | 974 } |
978 | 975 |
979 } // namespace content | 976 } // namespace content |
OLD | NEW |