Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(134)

Side by Side Diff: content/renderer/media/webrtc_audio_device_unittest.cc

Issue 8588030: Refactor the Get*Hardware* routines a bit. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/environment.h" 5 #include "base/environment.h"
6 #include "base/test/test_timeouts.h" 6 #include "base/test/test_timeouts.h"
7 #include "content/renderer/media/webrtc_audio_device_impl.h" 7 #include "content/renderer/media/webrtc_audio_device_impl.h"
8 #include "content/test/webrtc_audio_device_test.h" 8 #include "content/test/webrtc_audio_device_test.h"
9 #include "media/audio/audio_manager.h"
9 #include "media/audio/audio_util.h" 10 #include "media/audio/audio_util.h"
10 #include "testing/gmock/include/gmock/gmock.h" 11 #include "testing/gmock/include/gmock/gmock.h"
11 #include "third_party/webrtc/voice_engine/main/interface/voe_audio_processing.h" 12 #include "third_party/webrtc/voice_engine/main/interface/voe_audio_processing.h"
12 #include "third_party/webrtc/voice_engine/main/interface/voe_base.h" 13 #include "third_party/webrtc/voice_engine/main/interface/voe_base.h"
13 #include "third_party/webrtc/voice_engine/main/interface/voe_external_media.h" 14 #include "third_party/webrtc/voice_engine/main/interface/voe_external_media.h"
14 #include "third_party/webrtc/voice_engine/main/interface/voe_file.h" 15 #include "third_party/webrtc/voice_engine/main/interface/voe_file.h"
15 #include "third_party/webrtc/voice_engine/main/interface/voe_network.h" 16 #include "third_party/webrtc/voice_engine/main/interface/voe_network.h"
16 17
17 using testing::_; 18 using testing::_;
19 using testing::AnyNumber;
18 using testing::InvokeWithoutArgs; 20 using testing::InvokeWithoutArgs;
19 using testing::Return; 21 using testing::Return;
20 using testing::StrEq; 22 using testing::StrEq;
21 23
22 namespace { 24 namespace {
23 25
24 ACTION_P(QuitMessageLoop, loop_or_proxy) { 26 ACTION_P(QuitMessageLoop, loop_or_proxy) {
25 loop_or_proxy->PostTask(FROM_HERE, new MessageLoop::QuitTask()); 27 loop_or_proxy->PostTask(FROM_HERE, new MessageLoop::QuitTask());
26 } 28 }
27 29
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
110 int channels_; 112 int channels_;
111 DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl); 113 DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl);
112 }; 114 };
113 115
114 } // end namespace 116 } // end namespace
115 117
116 // Basic test that instantiates and initializes an instance of 118 // Basic test that instantiates and initializes an instance of
117 // WebRtcAudioDeviceImpl. 119 // WebRtcAudioDeviceImpl.
118 TEST_F(WebRTCAudioDeviceTest, Construct) { 120 TEST_F(WebRTCAudioDeviceTest, Construct) {
119 AudioUtilNoHardware audio_util(48000.0, 48000.0); 121 AudioUtilNoHardware audio_util(48000.0, 48000.0);
120 set_audio_util_callback(&audio_util); 122 SetAudioUtilCallback(&audio_util);
121 scoped_refptr<WebRtcAudioDeviceImpl> audio_device( 123 scoped_refptr<WebRtcAudioDeviceImpl> audio_device(
122 new WebRtcAudioDeviceImpl()); 124 new WebRtcAudioDeviceImpl());
123 audio_device->SetSessionId(1); 125 audio_device->SetSessionId(1);
124 126
125 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); 127 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
126 ASSERT_TRUE(engine.valid()); 128 ASSERT_TRUE(engine.valid());
127 129
128 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); 130 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
129 int err = base->Init(audio_device); 131 int err = base->Init(audio_device);
130 EXPECT_EQ(0, err); 132 EXPECT_EQ(0, err);
131 EXPECT_EQ(0, base->Terminate()); 133 EXPECT_EQ(0, base->Terminate());
132 } 134 }
133 135
134 // Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output 136 // Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output
135 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will 137 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
136 // be utilized to implement the actual audio path. The test registers a 138 // be utilized to implement the actual audio path. The test registers a
137 // webrtc::VoEExternalMedia implementation to hijack the output audio and 139 // webrtc::VoEExternalMedia implementation to hijack the output audio and
138 // verify that streaming starts correctly. 140 // verify that streaming starts correctly.
139 // Disabled when running headless since the bots don't have the required config. 141 // Disabled when running headless since the bots don't have the required config.
140 TEST_F(WebRTCAudioDeviceTest, StartPlayout) { 142 TEST_F(WebRTCAudioDeviceTest, StartPlayout) {
141 if (IsRunningHeadless()) 143 if (IsRunningHeadless())
142 return; 144 return;
143 145
144 AudioUtil audio_util; 146 AudioUtil audio_util;
145 set_audio_util_callback(&audio_util); 147 SetAudioUtilCallback(&audio_util);
146 148
147 EXPECT_CALL(media_observer(), 149 EXPECT_CALL(media_observer(),
148 OnSetAudioStreamStatus(_, 1, StrEq("created"))).Times(1); 150 OnSetAudioStreamStatus(_, 1, StrEq("created"))).Times(1);
149 EXPECT_CALL(media_observer(), 151 EXPECT_CALL(media_observer(),
150 OnSetAudioStreamPlaying(_, 1, true)).Times(1); 152 OnSetAudioStreamPlaying(_, 1, true)).Times(1);
151 EXPECT_CALL(media_observer(), 153 EXPECT_CALL(media_observer(),
152 OnSetAudioStreamStatus(_, 1, StrEq("closed"))).Times(1); 154 OnSetAudioStreamStatus(_, 1, StrEq("closed"))).Times(1);
153 EXPECT_CALL(media_observer(), 155 EXPECT_CALL(media_observer(),
154 OnDeleteAudioStream(_, 1)).Times(1); 156 OnDeleteAudioStream(_, 1)).Times(AnyNumber());
155 157
156 scoped_refptr<WebRtcAudioDeviceImpl> audio_device( 158 scoped_refptr<WebRtcAudioDeviceImpl> audio_device(
157 new WebRtcAudioDeviceImpl()); 159 new WebRtcAudioDeviceImpl());
158 audio_device->SetSessionId(1); 160 audio_device->SetSessionId(1);
159 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); 161 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
160 ASSERT_TRUE(engine.valid()); 162 ASSERT_TRUE(engine.valid());
161 163
162 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); 164 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
163 ASSERT_TRUE(base.valid()); 165 ASSERT_TRUE(base.valid());
164 int err = base->Init(audio_device); 166 int err = base->Init(audio_device);
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
204 // verify that streaming starts correctly. An external transport implementation 206 // verify that streaming starts correctly. An external transport implementation
205 // is also required to ensure that "sending" can start without actually trying 207 // is also required to ensure that "sending" can start without actually trying
206 // to send encoded packets to the network. Our main interest here is to ensure 208 // to send encoded packets to the network. Our main interest here is to ensure
207 // that the audio capturing starts as it should. 209 // that the audio capturing starts as it should.
208 // Disabled when running headless since the bots don't have the required config. 210 // Disabled when running headless since the bots don't have the required config.
209 TEST_F(WebRTCAudioDeviceTest, StartRecording) { 211 TEST_F(WebRTCAudioDeviceTest, StartRecording) {
210 if (IsRunningHeadless()) 212 if (IsRunningHeadless())
211 return; 213 return;
212 214
213 AudioUtil audio_util; 215 AudioUtil audio_util;
214 set_audio_util_callback(&audio_util); 216 SetAudioUtilCallback(&audio_util);
215 217
216 // TODO(tommi): extend MediaObserver and MockMediaObserver with support 218 // TODO(tommi): extend MediaObserver and MockMediaObserver with support
217 // for new interfaces, like OnSetAudioStreamRecording(). When done, add 219 // for new interfaces, like OnSetAudioStreamRecording(). When done, add
218 // EXPECT_CALL() macros here. 220 // EXPECT_CALL() macros here.
219 221
220 scoped_refptr<WebRtcAudioDeviceImpl> audio_device( 222 scoped_refptr<WebRtcAudioDeviceImpl> audio_device(
221 new WebRtcAudioDeviceImpl()); 223 new WebRtcAudioDeviceImpl());
222 audio_device->SetSessionId(1); 224 audio_device->SetSessionId(1);
223 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); 225 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
224 ASSERT_TRUE(engine.valid()); 226 ASSERT_TRUE(engine.valid());
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
271 // Uses WebRtcAudioDeviceImpl to play a local wave file. 273 // Uses WebRtcAudioDeviceImpl to play a local wave file.
272 // Disabled when running headless since the bots don't have the required config. 274 // Disabled when running headless since the bots don't have the required config.
273 TEST_F(WebRTCAudioDeviceTest, PlayLocalFile) { 275 TEST_F(WebRTCAudioDeviceTest, PlayLocalFile) {
274 if (IsRunningHeadless()) 276 if (IsRunningHeadless())
275 return; 277 return;
276 278
277 std::string file_path( 279 std::string file_path(
278 GetTestDataPath(FILE_PATH_LITERAL("speechmusic_mono_16kHz.pcm"))); 280 GetTestDataPath(FILE_PATH_LITERAL("speechmusic_mono_16kHz.pcm")));
279 281
280 AudioUtil audio_util; 282 AudioUtil audio_util;
281 set_audio_util_callback(&audio_util); 283 SetAudioUtilCallback(&audio_util);
282 284
283 EXPECT_CALL(media_observer(), 285 EXPECT_CALL(media_observer(),
284 OnSetAudioStreamStatus(_, 1, StrEq("created"))).Times(1); 286 OnSetAudioStreamStatus(_, 1, StrEq("created"))).Times(1);
285 EXPECT_CALL(media_observer(), 287 EXPECT_CALL(media_observer(),
286 OnSetAudioStreamPlaying(_, 1, true)).Times(1); 288 OnSetAudioStreamPlaying(_, 1, true)).Times(1);
287 EXPECT_CALL(media_observer(), 289 EXPECT_CALL(media_observer(),
288 OnSetAudioStreamStatus(_, 1, StrEq("closed"))).Times(1); 290 OnSetAudioStreamStatus(_, 1, StrEq("closed"))).Times(1);
289 EXPECT_CALL(media_observer(), 291 EXPECT_CALL(media_observer(),
290 OnDeleteAudioStream(_, 1)).Times(1); 292 OnDeleteAudioStream(_, 1)).Times(1);
291 293
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
327 // which are recorded, encoded, packetized into RTP packets and finally 329 // which are recorded, encoded, packetized into RTP packets and finally
328 // "transmitted". The RTP packets are then fed back into the VoiceEngine 330 // "transmitted". The RTP packets are then fed back into the VoiceEngine
329 // where they are decoded and played out on the default audio output device. 331 // where they are decoded and played out on the default audio output device.
330 // Disabled when running headless since the bots don't have the required config. 332 // Disabled when running headless since the bots don't have the required config.
331 // TODO(henrika): improve quality by using a wideband codec, enabling noise- 333 // TODO(henrika): improve quality by using a wideband codec, enabling noise-
332 // suppressions and perhaps also the digital AGC. 334 // suppressions and perhaps also the digital AGC.
333 TEST_F(WebRTCAudioDeviceTest, FullDuplexAudio) { 335 TEST_F(WebRTCAudioDeviceTest, FullDuplexAudio) {
334 if (IsRunningHeadless()) 336 if (IsRunningHeadless())
335 return; 337 return;
336 338
339 EXPECT_CALL(media_observer(),
340 OnSetAudioStreamStatus(_, 1, StrEq("created")));
341 EXPECT_CALL(media_observer(),
342 OnSetAudioStreamPlaying(_, 1, true));
343 EXPECT_CALL(media_observer(),
344 OnSetAudioStreamStatus(_, 1, StrEq("closed")));
345
337 AudioUtil audio_util; 346 AudioUtil audio_util;
338 set_audio_util_callback(&audio_util); 347 SetAudioUtilCallback(&audio_util);
339 348
340 scoped_refptr<WebRtcAudioDeviceImpl> audio_device( 349 scoped_refptr<WebRtcAudioDeviceImpl> audio_device(
341 new WebRtcAudioDeviceImpl()); 350 new WebRtcAudioDeviceImpl());
342 audio_device->SetSessionId(1); 351 audio_device->SetSessionId(1);
343 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); 352 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
344 ASSERT_TRUE(engine.valid()); 353 ASSERT_TRUE(engine.valid());
345 354
346 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); 355 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
347 ASSERT_TRUE(base.valid()); 356 ASSERT_TRUE(base.valid());
348 int err = base->Init(audio_device); 357 int err = base->Init(audio_device);
(...skipping 14 matching lines...) Expand all
363 new MessageLoop::QuitTask(), 372 new MessageLoop::QuitTask(),
364 TestTimeouts::action_timeout_ms()); 373 TestTimeouts::action_timeout_ms());
365 message_loop_.Run(); 374 message_loop_.Run();
366 375
367 EXPECT_EQ(0, base->StopSend(ch)); 376 EXPECT_EQ(0, base->StopSend(ch));
368 EXPECT_EQ(0, base->StopPlayout(ch)); 377 EXPECT_EQ(0, base->StopPlayout(ch));
369 378
370 EXPECT_EQ(0, base->DeleteChannel(ch)); 379 EXPECT_EQ(0, base->DeleteChannel(ch));
371 EXPECT_EQ(0, base->Terminate()); 380 EXPECT_EQ(0, base->Terminate());
372 } 381 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698