Index: content/renderer/media/webrtc_audio_device_unittest.cc |
diff --git a/content/renderer/media/webrtc_audio_device_unittest.cc b/content/renderer/media/webrtc_audio_device_unittest.cc |
index d6821f6fba0e52c4a576fcfb502ab1af5ecfe56c..d1ccb329e0101e8e2a8cdcd2d413b063f8fc3052 100644 |
--- a/content/renderer/media/webrtc_audio_device_unittest.cc |
+++ b/content/renderer/media/webrtc_audio_device_unittest.cc |
@@ -103,32 +103,27 @@ bool HardwareSampleRatesAreValid() { |
return true; |
} |
-// Utility method which creates and initializes the audio capturer and adds it |
-// to WebRTC audio device. This method should be used in tests where |
+// Utility method which creates the audio capturer, it returns a scoped |
+// reference of the capturer if it is created successfully, otherwise it returns |
+// NULL. This method should be used in tests where |
// HardwareSampleRatesAreValid() has been called and returned true. |
-bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) { |
- DCHECK(webrtc_audio_device); |
- scoped_refptr<WebRtcAudioCapturer> capturer( |
- WebRtcAudioCapturer::CreateCapturer()); |
- |
+scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer( |
+ WebRtcAudioDeviceImpl* webrtc_audio_device) { |
media::AudioHardwareConfig* hardware_config = |
RenderThreadImpl::current()->GetAudioHardwareConfig(); |
- |
// Use native capture sample rate and channel configuration to get some |
// action in this test. |
int sample_rate = hardware_config->GetInputSampleRate(); |
media::ChannelLayout channel_layout = |
hardware_config->GetInputChannelLayout(); |
- if (!capturer->Initialize(kRenderViewId, channel_layout, sample_rate, 0, 1, |
- media::AudioManagerBase::kDefaultDeviceId, 0, 0, |
- media::AudioParameters::NO_EFFECTS)) { |
- return false; |
- } |
- // Add the capturer to the WebRtcAudioDeviceImpl. |
- webrtc_audio_device->AddAudioCapturer(capturer); |
- |
- return true; |
+ StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE, |
+ media::AudioManagerBase::kDefaultDeviceName, |
+ media::AudioManagerBase::kDefaultDeviceId, |
+ sample_rate, channel_layout, 0); |
+ device.session_id = 1; |
+ return WebRtcAudioCapturer::CreateCapturer(kRenderViewId, device, |
+ webrtc_audio_device); |
} |
// Create and start a local audio track. Starting the audio track will connect |
@@ -488,7 +483,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) { |
ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); |
int err = base->Init(webrtc_audio_device.get()); |
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); |
+ EXPECT_TRUE(CreateAudioCapturer(webrtc_audio_device) != NULL); |
EXPECT_EQ(0, err); |
EXPECT_EQ(0, base->Terminate()); |
} |
@@ -637,15 +632,15 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) { |
EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get())); |
EXPECT_EQ(0, base->StartSend(ch)); |
- // Create and initialize the capturer which starts the source of the data |
- // flow. |
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); |
+ // Create the capturer which starts the source of the data flow. |
+ scoped_refptr<WebRtcAudioCapturer> capturer( |
+ CreateAudioCapturer(webrtc_audio_device)); |
+ EXPECT_TRUE(capturer); |
// Create and start a local audio track which is bridging the data flow |
// between the capturer and WebRtcAudioDeviceImpl. |
scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( |
- CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(), |
- webrtc_audio_device)); |
+ CreateAndStartLocalAudioTrack(capturer, webrtc_audio_device)); |
// connect the VoE voice channel to the audio track |
static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> |
GetRenderer()->AddChannel(ch); |
@@ -665,7 +660,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) { |
ch, webrtc::kRecordingPerChannel)); |
EXPECT_EQ(0, base->StopSend(ch)); |
- webrtc_audio_device->GetDefaultCapturer()->Stop(); |
+ capturer->Stop(); |
EXPECT_EQ(0, base->DeleteChannel(ch)); |
EXPECT_EQ(0, base->Terminate()); |
} |
@@ -794,10 +789,11 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) { |
int ch = base->CreateChannel(); |
EXPECT_NE(-1, ch); |
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); |
+ scoped_refptr<WebRtcAudioCapturer> capturer( |
+ CreateAudioCapturer(webrtc_audio_device)); |
+ EXPECT_TRUE(capturer); |
scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( |
- CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(), |
- webrtc_audio_device)); |
+ CreateAndStartLocalAudioTrack(capturer, webrtc_audio_device)); |
// connect the VoE voice channel to the audio track |
static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> |
GetRenderer()->AddChannel(ch); |
@@ -823,7 +819,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) { |
base::TimeDelta::FromSeconds(2)); |
message_loop_.Run(); |
- webrtc_audio_device->GetDefaultCapturer()->Stop(); |
+ capturer->Stop(); |
proxy->Stop(); |
EXPECT_EQ(0, base->StopSend(ch)); |
EXPECT_EQ(0, base->StopPlayout(ch)); |
@@ -860,7 +856,9 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) { |
int ch = base->CreateChannel(); |
EXPECT_NE(-1, ch); |
- EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get())); |
+ scoped_refptr<WebRtcAudioCapturer> capturer( |
+ CreateAudioCapturer(webrtc_audio_device)); |
+ EXPECT_TRUE(capturer); |
base::WaitableEvent event(false, false); |
scoped_ptr<MockMediaStreamAudioSink> sink( |
new MockMediaStreamAudioSink(&event)); |
@@ -868,8 +866,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) { |
// Create and start a local audio track. Starting the audio track will connect |
// the audio track to the capturer and also start the source of the capturer. |
scoped_refptr<WebRtcLocalAudioTrack> local_audio_track( |
- CreateAndStartLocalAudioTrack( |
- webrtc_audio_device->GetDefaultCapturer().get(), sink.get())); |
+ CreateAndStartLocalAudioTrack(capturer, sink.get())); |
// connect the VoE voice channel to the audio track. |
static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())-> |
@@ -882,7 +879,7 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) { |
int delay = (base::Time::Now() - start_time).InMilliseconds(); |
PrintPerfResultMs("webrtc_recording_setup_c", "t", delay); |
- webrtc_audio_device->GetDefaultCapturer()->Stop(); |
+ capturer->Stop(); |
EXPECT_EQ(0, base->StopSend(ch)); |
EXPECT_EQ(0, base->DeleteChannel(ch)); |
EXPECT_EQ(0, base->Terminate()); |