OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include <vector> | |
6 | |
7 #include "base/environment.h" | |
8 #include "base/file_util.h" | |
9 #include "base/files/file_path.h" | |
10 #include "base/path_service.h" | |
11 #include "base/strings/stringprintf.h" | |
12 #include "base/test/test_timeouts.h" | |
13 #include "content/renderer/media/mock_media_stream_dependency_factory.h" | |
14 #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" | |
15 #include "content/renderer/media/webrtc_audio_capturer.h" | |
16 #include "content/renderer/media/webrtc_audio_device_impl.h" | |
17 #include "content/renderer/media/webrtc_audio_renderer.h" | |
18 #include "content/renderer/media/webrtc_local_audio_track.h" | |
19 #include "content/renderer/render_thread_impl.h" | |
20 #include "content/test/webrtc_audio_device_test.h" | |
21 #include "media/audio/audio_manager_base.h" | |
22 #include "media/base/audio_hardware_config.h" | |
23 #include "testing/gmock/include/gmock/gmock.h" | |
24 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" | |
25 #include "third_party/webrtc/voice_engine/include/voe_audio_processing.h" | |
26 #include "third_party/webrtc/voice_engine/include/voe_base.h" | |
27 #include "third_party/webrtc/voice_engine/include/voe_codec.h" | |
28 #include "third_party/webrtc/voice_engine/include/voe_external_media.h" | |
29 #include "third_party/webrtc/voice_engine/include/voe_file.h" | |
30 #include "third_party/webrtc/voice_engine/include/voe_network.h" | |
31 | |
32 #if defined(OS_WIN) | |
33 #include "base/win/windows_version.h" | |
34 #endif | |
35 | |
36 using media::AudioParameters; | |
37 using media::CHANNEL_LAYOUT_STEREO; | |
38 using testing::_; | |
39 using testing::AnyNumber; | |
40 using testing::InvokeWithoutArgs; | |
41 using testing::Return; | |
42 using testing::StrEq; | |
43 | |
44 namespace content { | |
45 | |
46 namespace { | |
47 | |
48 const int kRenderViewId = 1; | |
49 | |
50 // The number of packers that RunWebRtcLoopbackTimeTest() uses for measurement. | |
51 const int kNumberOfPacketsForLoopbackTest = 100; | |
52 | |
53 // The hardware latency we feed to WebRtc. | |
54 const int kHardwareLatencyInMs = 50; | |
55 | |
56 scoped_ptr<media::AudioHardwareConfig> CreateRealHardwareConfig( | |
57 media::AudioManager* manager) { | |
58 const AudioParameters output_parameters = | |
59 manager->GetDefaultOutputStreamParameters(); | |
60 const AudioParameters input_parameters = | |
61 manager->GetInputStreamParameters( | |
62 media::AudioManagerBase::kDefaultDeviceId); | |
63 | |
64 return make_scoped_ptr(new media::AudioHardwareConfig( | |
65 input_parameters, output_parameters)); | |
66 } | |
67 | |
68 // Return true if at least one element in the array matches |value|. | |
69 bool FindElementInArray(const int* array, int size, int value) { | |
70 return (std::find(&array[0], &array[0] + size, value) != &array[size]); | |
71 } | |
72 | |
73 // This method returns false if a non-supported rate is detected on the | |
74 // input or output side. | |
75 // TODO(henrika): add support for automatic fallback to Windows Wave audio | |
76 // if a non-supported rate is detected. It is probably better to detect | |
77 // invalid audio settings by actually trying to open the audio streams instead | |
78 // of relying on hard coded conditions. | |
79 bool HardwareSampleRatesAreValid() { | |
80 // These are the currently supported hardware sample rates in both directions. | |
81 // The actual WebRTC client can limit these ranges further depending on | |
82 // platform but this is the maximum range we support today. | |
83 int valid_input_rates[] = {16000, 32000, 44100, 48000, 96000}; | |
84 int valid_output_rates[] = {16000, 32000, 44100, 48000, 96000}; | |
85 | |
86 media::AudioHardwareConfig* hardware_config = | |
87 RenderThreadImpl::current()->GetAudioHardwareConfig(); | |
88 | |
89 // Verify the input sample rate. | |
90 int input_sample_rate = hardware_config->GetInputSampleRate(); | |
91 | |
92 if (!FindElementInArray(valid_input_rates, arraysize(valid_input_rates), | |
93 input_sample_rate)) { | |
94 LOG(WARNING) << "Non-supported input sample rate detected."; | |
95 return false; | |
96 } | |
97 | |
98 // Given that the input rate was OK, verify the output rate as well. | |
99 int output_sample_rate = hardware_config->GetOutputSampleRate(); | |
100 if (!FindElementInArray(valid_output_rates, arraysize(valid_output_rates), | |
101 output_sample_rate)) { | |
102 LOG(WARNING) << "Non-supported output sample rate detected."; | |
103 return false; | |
104 } | |
105 | |
106 return true; | |
107 } | |
108 | |
109 // Utility method which creates the audio capturer, it returns a scoped | |
110 // reference of the capturer if it is created successfully, otherwise it returns | |
111 // NULL. This method should be used in tests where | |
112 // HardwareSampleRatesAreValid() has been called and returned true. | |
113 scoped_refptr<WebRtcAudioCapturer> CreateAudioCapturer( | |
114 WebRtcAudioDeviceImpl* webrtc_audio_device) { | |
115 media::AudioHardwareConfig* hardware_config = | |
116 RenderThreadImpl::current()->GetAudioHardwareConfig(); | |
117 // Use native capture sample rate and channel configuration to get some | |
118 // action in this test. | |
119 int sample_rate = hardware_config->GetInputSampleRate(); | |
120 media::ChannelLayout channel_layout = | |
121 hardware_config->GetInputChannelLayout(); | |
122 blink::WebMediaConstraints constraints; | |
123 StreamDeviceInfo device(MEDIA_DEVICE_AUDIO_CAPTURE, | |
124 media::AudioManagerBase::kDefaultDeviceName, | |
125 media::AudioManagerBase::kDefaultDeviceId, | |
126 sample_rate, channel_layout, 0); | |
127 device.session_id = 1; | |
128 return WebRtcAudioCapturer::CreateCapturer(kRenderViewId, device, | |
129 constraints, | |
130 webrtc_audio_device); | |
131 } | |
132 | |
133 // Create and start a local audio track. Starting the audio track will connect | |
134 // the audio track to the capturer and also start the source of the capturer. | |
135 // Also, connect the sink to the audio track. | |
136 scoped_ptr<WebRtcLocalAudioTrack> | |
137 CreateAndStartLocalAudioTrack(WebRtcLocalAudioTrackAdapter* adapter, | |
138 WebRtcAudioCapturer* capturer, | |
139 PeerConnectionAudioSink* sink) { | |
140 scoped_ptr<WebRtcLocalAudioTrack> local_audio_track( | |
141 new WebRtcLocalAudioTrack(adapter, capturer, NULL)); | |
142 | |
143 local_audio_track->AddSink(sink); | |
144 local_audio_track->Start(); | |
145 return local_audio_track.Pass(); | |
146 } | |
147 | |
148 class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess { | |
149 public: | |
150 explicit WebRTCMediaProcessImpl(base::WaitableEvent* event) | |
151 : event_(event), | |
152 channel_id_(-1), | |
153 type_(webrtc::kPlaybackPerChannel), | |
154 packet_size_(0), | |
155 sample_rate_(0), | |
156 channels_(0) { | |
157 } | |
158 virtual ~WebRTCMediaProcessImpl() {} | |
159 | |
160 // TODO(henrika): Refactor in WebRTC and convert to Chrome coding style. | |
161 virtual void Process(int channel, | |
162 webrtc::ProcessingTypes type, | |
163 int16_t audio_10ms[], | |
164 int length, | |
165 int sampling_freq, | |
166 bool is_stereo) OVERRIDE { | |
167 base::AutoLock auto_lock(lock_); | |
168 channel_id_ = channel; | |
169 type_ = type; | |
170 packet_size_ = length; | |
171 sample_rate_ = sampling_freq; | |
172 channels_ = (is_stereo ? 2 : 1); | |
173 if (event_) { | |
174 // Signal that a new callback has been received. | |
175 event_->Signal(); | |
176 } | |
177 } | |
178 | |
179 int channel_id() const { | |
180 base::AutoLock auto_lock(lock_); | |
181 return channel_id_; | |
182 } | |
183 | |
184 int type() const { | |
185 base::AutoLock auto_lock(lock_); | |
186 return type_; | |
187 } | |
188 | |
189 int packet_size() const { | |
190 base::AutoLock auto_lock(lock_); | |
191 return packet_size_; | |
192 } | |
193 | |
194 int sample_rate() const { | |
195 base::AutoLock auto_lock(lock_); | |
196 return sample_rate_; | |
197 } | |
198 | |
199 private: | |
200 base::WaitableEvent* event_; | |
201 int channel_id_; | |
202 webrtc::ProcessingTypes type_; | |
203 int packet_size_; | |
204 int sample_rate_; | |
205 int channels_; | |
206 mutable base::Lock lock_; | |
207 DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl); | |
208 }; | |
209 | |
210 // TODO(xians): Use MediaStreamAudioSink. | |
211 class MockMediaStreamAudioSink : public PeerConnectionAudioSink { | |
212 public: | |
213 explicit MockMediaStreamAudioSink(base::WaitableEvent* event) | |
214 : event_(event) { | |
215 DCHECK(event_); | |
216 } | |
217 virtual ~MockMediaStreamAudioSink() {} | |
218 | |
219 // PeerConnectionAudioSink implementation. | |
220 virtual int OnData(const int16* audio_data, | |
221 int sample_rate, | |
222 int number_of_channels, | |
223 int number_of_frames, | |
224 const std::vector<int>& channels, | |
225 int audio_delay_milliseconds, | |
226 int current_volume, | |
227 bool need_audio_processing, | |
228 bool key_pressed) OVERRIDE { | |
229 // Signal that a callback has been received. | |
230 event_->Signal(); | |
231 return 0; | |
232 } | |
233 | |
234 // Set the format for the capture audio parameters. | |
235 virtual void OnSetFormat( | |
236 const media::AudioParameters& params) OVERRIDE {} | |
237 | |
238 private: | |
239 base::WaitableEvent* event_; | |
240 | |
241 DISALLOW_COPY_AND_ASSIGN(MockMediaStreamAudioSink); | |
242 }; | |
243 | |
244 class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource { | |
245 public: | |
246 explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event) | |
247 : event_(event) { | |
248 DCHECK(event_); | |
249 } | |
250 virtual ~MockWebRtcAudioRendererSource() {} | |
251 | |
252 // WebRtcAudioRendererSource implementation. | |
253 virtual void RenderData(media::AudioBus* audio_bus, | |
254 int sample_rate, | |
255 int audio_delay_milliseconds) OVERRIDE { | |
256 // Signal that a callback has been received. | |
257 // Initialize the memory to zero to avoid uninitialized warning from | |
258 // Valgrind. | |
259 audio_bus->Zero(); | |
260 event_->Signal(); | |
261 } | |
262 | |
263 virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {}; | |
264 | |
265 private: | |
266 base::WaitableEvent* event_; | |
267 | |
268 DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource); | |
269 }; | |
270 | |
271 // Prints numerical information to stdout in a controlled format so we can plot | |
272 // the result. | |
273 void PrintPerfResultMs(const char* graph, const char* trace, float time_ms) { | |
274 std::string times; | |
275 base::StringAppendF(×, "%.2f,", time_ms); | |
276 std::string result = base::StringPrintf( | |
277 "%sRESULT %s%s: %s= %s%s%s %s\n", "*", graph, "", | |
278 trace, "[", times.c_str(), "]", "ms"); | |
279 | |
280 fflush(stdout); | |
281 printf("%s", result.c_str()); | |
282 fflush(stdout); | |
283 } | |
284 | |
285 void ReadDataFromSpeechFile(char* data, int length) { | |
286 base::FilePath data_file; | |
287 CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &data_file)); | |
288 data_file = | |
289 data_file.Append(FILE_PATH_LITERAL("media")) | |
290 .Append(FILE_PATH_LITERAL("test")) | |
291 .Append(FILE_PATH_LITERAL("data")) | |
292 .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw")); | |
293 DCHECK(base::PathExists(data_file)); | |
294 int64 data_file_size64 = 0; | |
295 DCHECK(base::GetFileSize(data_file, &data_file_size64)); | |
296 EXPECT_EQ(length, base::ReadFile(data_file, data, length)); | |
297 DCHECK(data_file_size64 > length); | |
298 } | |
299 | |
300 void SetChannelCodec(webrtc::VoiceEngine* engine, int channel) { | |
301 // TODO(xians): move the codec as an input param to this function, and add | |
302 // tests for different codecs, also add support to Android and IOS. | |
303 #if !defined(OS_ANDROID) && !defined(OS_IOS) | |
304 webrtc::CodecInst isac; | |
305 strcpy(isac.plname, "ISAC"); | |
306 isac.pltype = 104; | |
307 isac.pacsize = 960; | |
308 isac.plfreq = 32000; | |
309 isac.channels = 1; | |
310 isac.rate = -1; | |
311 ScopedWebRTCPtr<webrtc::VoECodec> codec(engine); | |
312 EXPECT_EQ(0, codec->SetRecPayloadType(channel, isac)); | |
313 EXPECT_EQ(0, codec->SetSendCodec(channel, isac)); | |
314 #endif | |
315 } | |
316 | |
317 // Returns the time in millisecond for sending packets to WebRtc for encoding, | |
318 // signal processing, decoding and receiving them back. | |
319 int RunWebRtcLoopbackTimeTest(media::AudioManager* manager, | |
320 bool enable_apm) { | |
321 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | |
322 new WebRtcAudioDeviceImpl()); | |
323 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | |
324 EXPECT_TRUE(engine.valid()); | |
325 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | |
326 EXPECT_TRUE(base.valid()); | |
327 int err = base->Init(webrtc_audio_device.get()); | |
328 EXPECT_EQ(0, err); | |
329 | |
330 // We use OnSetFormat() to configure the audio parameters so that this | |
331 // test can run on machine without hardware device. | |
332 const media::AudioParameters params = media::AudioParameters( | |
333 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, | |
334 48000, 2, 480); | |
335 PeerConnectionAudioSink* capturer_sink = | |
336 static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get()); | |
337 WebRtcAudioRendererSource* renderer_source = | |
338 static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get()); | |
339 | |
340 // Turn on/off all the signal processing components like AGC, AEC and NS. | |
341 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get()); | |
342 EXPECT_TRUE(audio_processing.valid()); | |
343 audio_processing->SetAgcStatus(enable_apm); | |
344 audio_processing->SetNsStatus(enable_apm); | |
345 audio_processing->SetEcStatus(enable_apm); | |
346 | |
347 // Create a voice channel for the WebRtc. | |
348 int channel = base->CreateChannel(); | |
349 EXPECT_NE(-1, channel); | |
350 SetChannelCodec(engine.get(), channel); | |
351 | |
352 // Use our fake network transmission and start playout and recording. | |
353 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); | |
354 EXPECT_TRUE(network.valid()); | |
355 scoped_ptr<WebRTCTransportImpl> transport( | |
356 new WebRTCTransportImpl(network.get())); | |
357 EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get())); | |
358 EXPECT_EQ(0, base->StartPlayout(channel)); | |
359 EXPECT_EQ(0, base->StartSend(channel)); | |
360 | |
361 // Read speech data from a speech test file. | |
362 const int input_packet_size = | |
363 params.frames_per_buffer() * 2 * params.channels(); | |
364 const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest; | |
365 scoped_ptr<char[]> capture_data(new char[length]); | |
366 ReadDataFromSpeechFile(capture_data.get(), length); | |
367 | |
368 // Start the timer. | |
369 scoped_ptr<media::AudioBus> render_audio_bus(media::AudioBus::Create(params)); | |
370 base::Time start_time = base::Time::Now(); | |
371 int delay = 0; | |
372 std::vector<int> voe_channels; | |
373 voe_channels.push_back(channel); | |
374 for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) { | |
375 // Sending fake capture data to WebRtc. | |
376 capturer_sink->OnData( | |
377 reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j), | |
378 params.sample_rate(), | |
379 params.channels(), | |
380 params.frames_per_buffer(), | |
381 voe_channels, | |
382 kHardwareLatencyInMs, | |
383 1.0, | |
384 enable_apm, | |
385 false); | |
386 | |
387 // Receiving data from WebRtc. | |
388 renderer_source->RenderData( | |
389 render_audio_bus.get(), params.sample_rate(), | |
390 kHardwareLatencyInMs + delay); | |
391 delay = (base::Time::Now() - start_time).InMilliseconds(); | |
392 } | |
393 | |
394 int latency = (base::Time::Now() - start_time).InMilliseconds(); | |
395 | |
396 EXPECT_EQ(0, base->StopSend(channel)); | |
397 EXPECT_EQ(0, base->StopPlayout(channel)); | |
398 EXPECT_EQ(0, base->DeleteChannel(channel)); | |
399 EXPECT_EQ(0, base->Terminate()); | |
400 | |
401 return latency; | |
402 } | |
403 | |
404 } // namespace | |
405 | |
406 // Trivial test which verifies that one part of the test harness | |
407 // (HardwareSampleRatesAreValid()) works as intended for all supported | |
408 // hardware input sample rates. | |
409 TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidInputRates) { | |
410 int valid_rates[] = {16000, 32000, 44100, 48000, 96000}; | |
411 | |
412 // Verify that we will approve all rates listed in |valid_rates|. | |
413 for (size_t i = 0; i < arraysize(valid_rates); ++i) { | |
414 EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates), | |
415 valid_rates[i])); | |
416 } | |
417 | |
418 // Verify that any value outside the valid range results in negative | |
419 // find results. | |
420 int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 192000}; | |
421 for (size_t i = 0; i < arraysize(invalid_rates); ++i) { | |
422 EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates), | |
423 invalid_rates[i])); | |
424 } | |
425 } | |
426 | |
427 // Trivial test which verifies that one part of the test harness | |
428 // (HardwareSampleRatesAreValid()) works as intended for all supported | |
429 // hardware output sample rates. | |
430 TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidOutputRates) { | |
431 int valid_rates[] = {44100, 48000, 96000}; | |
432 | |
433 // Verify that we will approve all rates listed in |valid_rates|. | |
434 for (size_t i = 0; i < arraysize(valid_rates); ++i) { | |
435 EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates), | |
436 valid_rates[i])); | |
437 } | |
438 | |
439 // Verify that any value outside the valid range results in negative | |
440 // find results. | |
441 int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 32000, 192000}; | |
442 for (size_t i = 0; i < arraysize(invalid_rates); ++i) { | |
443 EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates), | |
444 invalid_rates[i])); | |
445 } | |
446 } | |
447 | |
448 // Basic test that instantiates and initializes an instance of | |
449 // WebRtcAudioDeviceImpl. | |
450 TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) { | |
451 #if defined(OS_WIN) | |
452 // This test crashes on Win XP bots. | |
453 if (base::win::GetVersion() <= base::win::VERSION_XP) | |
454 return; | |
455 #endif | |
456 | |
457 AudioParameters input_params( | |
458 AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
459 media::CHANNEL_LAYOUT_MONO, | |
460 48000, | |
461 16, | |
462 480); | |
463 | |
464 AudioParameters output_params( | |
465 AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
466 media::CHANNEL_LAYOUT_STEREO, | |
467 48000, | |
468 16, | |
469 480); | |
470 | |
471 media::AudioHardwareConfig audio_config(input_params, output_params); | |
472 SetAudioHardwareConfig(&audio_config); | |
473 | |
474 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | |
475 new WebRtcAudioDeviceImpl()); | |
476 | |
477 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | |
478 ASSERT_TRUE(engine.valid()); | |
479 | |
480 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | |
481 int err = base->Init(webrtc_audio_device.get()); | |
482 EXPECT_TRUE(CreateAudioCapturer(webrtc_audio_device) != NULL); | |
483 EXPECT_EQ(0, err); | |
484 EXPECT_EQ(0, base->Terminate()); | |
485 } | |
486 | |
487 // Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output | |
488 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will | |
489 // be utilized to implement the actual audio path. The test registers a | |
490 // webrtc::VoEExternalMedia implementation to hijack the output audio and | |
491 // verify that streaming starts correctly. | |
492 // TODO(henrika): include on Android as well as soon as alla race conditions | |
493 // in OpenSLES are resolved. | |
494 #if defined(OS_ANDROID) | |
495 #define MAYBE_StartPlayout DISABLED_StartPlayout | |
496 #else | |
497 #define MAYBE_StartPlayout StartPlayout | |
498 #endif | |
499 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartPlayout) { | |
500 if (!has_output_devices_) { | |
501 LOG(WARNING) << "No output device detected."; | |
502 return; | |
503 } | |
504 | |
505 scoped_ptr<media::AudioHardwareConfig> config = | |
506 CreateRealHardwareConfig(audio_manager_.get()); | |
507 SetAudioHardwareConfig(config.get()); | |
508 | |
509 if (!HardwareSampleRatesAreValid()) | |
510 return; | |
511 | |
512 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | |
513 ASSERT_TRUE(engine.valid()); | |
514 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | |
515 ASSERT_TRUE(base.valid()); | |
516 | |
517 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | |
518 new WebRtcAudioDeviceImpl()); | |
519 int err = base->Init(webrtc_audio_device.get()); | |
520 ASSERT_EQ(0, err); | |
521 | |
522 ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get()); | |
523 ASSERT_TRUE(external_media.valid()); | |
524 base::WaitableEvent event(false, false); | |
525 scoped_ptr<WebRTCMediaProcessImpl> media_process( | |
526 new WebRTCMediaProcessImpl(&event)); | |
527 int ch = base->CreateChannel(); | |
528 EXPECT_NE(-1, ch); | |
529 EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing( | |
530 ch, webrtc::kPlaybackPerChannel, *media_process.get())); | |
531 | |
532 scoped_refptr<webrtc::MediaStreamInterface> media_stream( | |
533 new talk_base::RefCountedObject<MockMediaStream>("label")); | |
534 | |
535 EXPECT_EQ(0, base->StartPlayout(ch)); | |
536 scoped_refptr<WebRtcAudioRenderer> renderer( | |
537 CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream)); | |
538 scoped_refptr<MediaStreamAudioRenderer> proxy( | |
539 renderer->CreateSharedAudioRendererProxy(media_stream)); | |
540 EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get())); | |
541 proxy->Start(); | |
542 proxy->Play(); | |
543 | |
544 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); | |
545 WaitForIOThreadCompletion(); | |
546 | |
547 EXPECT_TRUE(webrtc_audio_device->Playing()); | |
548 EXPECT_FALSE(webrtc_audio_device->Recording()); | |
549 EXPECT_EQ(ch, media_process->channel_id()); | |
550 EXPECT_EQ(webrtc::kPlaybackPerChannel, media_process->type()); | |
551 EXPECT_EQ(80, media_process->packet_size()); | |
552 EXPECT_EQ(8000, media_process->sample_rate()); | |
553 | |
554 EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing( | |
555 ch, webrtc::kPlaybackPerChannel)); | |
556 EXPECT_EQ(0, base->StopPlayout(ch)); | |
557 proxy->Stop(); | |
558 EXPECT_EQ(0, base->DeleteChannel(ch)); | |
559 EXPECT_EQ(0, base->Terminate()); | |
560 } | |
561 | |
562 // Verify that a call to webrtc::VoEBase::StartRecording() starts audio input | |
563 // with the correct set of parameters. A WebRtcAudioDeviceImpl instance will | |
564 // be utilized to implement the actual audio path. The test registers a | |
565 // webrtc::VoEExternalMedia implementation to hijack the input audio and | |
566 // verify that streaming starts correctly. An external transport implementation | |
567 // is also required to ensure that "sending" can start without actually trying | |
568 // to send encoded packets to the network. Our main interest here is to ensure | |
569 // that the audio capturing starts as it should. | |
570 // Disabled when running headless since the bots don't have the required config. | |
571 | |
572 // TODO(leozwang): Because ExternalMediaProcessing is disabled in webrtc, | |
573 // disable this unit test on Android for now. | |
574 #if defined(OS_ANDROID) | |
575 #define MAYBE_StartRecording DISABLED_StartRecording | |
576 #elif defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) | |
577 // This test is failing on ARM linux: http://crbug.com/238490 | |
578 #define MAYBE_StartRecording DISABLED_StartRecording | |
579 #else | |
580 // Flakily hangs on all other platforms as well: crbug.com/268376. | |
581 // When the flakiness has been fixed, you probably want to leave it disabled | |
582 // on the above platforms. | |
583 #define MAYBE_StartRecording DISABLED_StartRecording | |
584 #endif | |
585 | |
586 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) { | |
587 if (!has_input_devices_ || !has_output_devices_) { | |
588 LOG(WARNING) << "Missing audio devices."; | |
589 return; | |
590 } | |
591 | |
592 scoped_ptr<media::AudioHardwareConfig> config = | |
593 CreateRealHardwareConfig(audio_manager_.get()); | |
594 SetAudioHardwareConfig(config.get()); | |
595 | |
596 if (!HardwareSampleRatesAreValid()) | |
597 return; | |
598 | |
599 // TODO(tommi): extend MediaObserver and MockMediaObserver with support | |
600 // for new interfaces, like OnSetAudioStreamRecording(). When done, add | |
601 // EXPECT_CALL() macros here. | |
602 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | |
603 new WebRtcAudioDeviceImpl()); | |
604 | |
605 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | |
606 ASSERT_TRUE(engine.valid()); | |
607 | |
608 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | |
609 ASSERT_TRUE(base.valid()); | |
610 int err = base->Init(webrtc_audio_device.get()); | |
611 ASSERT_EQ(0, err); | |
612 | |
613 int ch = base->CreateChannel(); | |
614 EXPECT_NE(-1, ch); | |
615 | |
616 ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get()); | |
617 ASSERT_TRUE(external_media.valid()); | |
618 | |
619 base::WaitableEvent event(false, false); | |
620 scoped_ptr<WebRTCMediaProcessImpl> media_process( | |
621 new WebRTCMediaProcessImpl(&event)); | |
622 EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing( | |
623 ch, webrtc::kRecordingPerChannel, *media_process.get())); | |
624 | |
625 // We must add an external transport implementation to be able to start | |
626 // recording without actually sending encoded packets to the network. All | |
627 // we want to do here is to verify that audio capturing starts as it should. | |
628 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); | |
629 scoped_ptr<WebRTCTransportImpl> transport( | |
630 new WebRTCTransportImpl(network.get())); | |
631 EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get())); | |
632 EXPECT_EQ(0, base->StartSend(ch)); | |
633 | |
634 // Create the capturer which starts the source of the data flow. | |
635 scoped_refptr<WebRtcAudioCapturer> capturer( | |
636 CreateAudioCapturer(webrtc_audio_device)); | |
637 EXPECT_TRUE(capturer); | |
638 | |
639 // Create and start a local audio track which is bridging the data flow | |
640 // between the capturer and WebRtcAudioDeviceImpl. | |
641 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( | |
642 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)); | |
643 scoped_ptr<WebRtcLocalAudioTrack> local_audio_track( | |
644 CreateAndStartLocalAudioTrack(adapter, capturer, webrtc_audio_device)); | |
645 // connect the VoE voice channel to the audio track | |
646 static_cast<webrtc::AudioTrackInterface*>( | |
647 adapter.get())->GetRenderer()->AddChannel(ch); | |
648 | |
649 // Verify we get the data flow. | |
650 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); | |
651 WaitForIOThreadCompletion(); | |
652 | |
653 EXPECT_FALSE(webrtc_audio_device->Playing()); | |
654 EXPECT_TRUE(webrtc_audio_device->Recording()); | |
655 EXPECT_EQ(ch, media_process->channel_id()); | |
656 EXPECT_EQ(webrtc::kRecordingPerChannel, media_process->type()); | |
657 EXPECT_EQ(80, media_process->packet_size()); | |
658 EXPECT_EQ(8000, media_process->sample_rate()); | |
659 | |
660 EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing( | |
661 ch, webrtc::kRecordingPerChannel)); | |
662 EXPECT_EQ(0, base->StopSend(ch)); | |
663 | |
664 capturer->Stop(); | |
665 EXPECT_EQ(0, base->DeleteChannel(ch)); | |
666 EXPECT_EQ(0, base->Terminate()); | |
667 } | |
668 | |
669 // Uses WebRtcAudioDeviceImpl to play a local wave file. | |
670 // TODO(henrika): include on Android as well as soon as alla race conditions | |
671 // in OpenSLES are resolved. | |
672 #if defined(OS_ANDROID) | |
673 #define MAYBE_PlayLocalFile DISABLED_PlayLocalFile | |
674 #else | |
675 #define MAYBE_PlayLocalFile PlayLocalFile | |
676 #endif | |
677 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_PlayLocalFile) { | |
678 if (!has_output_devices_) { | |
679 LOG(WARNING) << "No output device detected."; | |
680 return; | |
681 } | |
682 | |
683 std::string file_path( | |
684 GetTestDataPath(FILE_PATH_LITERAL("speechmusic_mono_16kHz.pcm"))); | |
685 | |
686 scoped_ptr<media::AudioHardwareConfig> config = | |
687 CreateRealHardwareConfig(audio_manager_.get()); | |
688 SetAudioHardwareConfig(config.get()); | |
689 | |
690 if (!HardwareSampleRatesAreValid()) | |
691 return; | |
692 | |
693 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | |
694 ASSERT_TRUE(engine.valid()); | |
695 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | |
696 ASSERT_TRUE(base.valid()); | |
697 | |
698 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | |
699 new WebRtcAudioDeviceImpl()); | |
700 int err = base->Init(webrtc_audio_device.get()); | |
701 ASSERT_EQ(0, err); | |
702 int ch = base->CreateChannel(); | |
703 EXPECT_NE(-1, ch); | |
704 EXPECT_EQ(0, base->StartPlayout(ch)); | |
705 scoped_refptr<webrtc::MediaStreamInterface> media_stream( | |
706 new talk_base::RefCountedObject<MockMediaStream>("label")); | |
707 scoped_refptr<WebRtcAudioRenderer> renderer( | |
708 CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream)); | |
709 scoped_refptr<MediaStreamAudioRenderer> proxy( | |
710 renderer->CreateSharedAudioRendererProxy(media_stream)); | |
711 EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get())); | |
712 proxy->Start(); | |
713 proxy->Play(); | |
714 | |
715 ScopedWebRTCPtr<webrtc::VoEFile> file(engine.get()); | |
716 ASSERT_TRUE(file.valid()); | |
717 int duration = 0; | |
718 EXPECT_EQ(0, file->GetFileDuration(file_path.c_str(), duration, | |
719 webrtc::kFileFormatPcm16kHzFile)); | |
720 EXPECT_NE(0, duration); | |
721 | |
722 EXPECT_EQ(0, file->StartPlayingFileLocally(ch, file_path.c_str(), false, | |
723 webrtc::kFileFormatPcm16kHzFile)); | |
724 | |
725 // Play 2 seconds worth of audio and then quit. | |
726 message_loop_.PostDelayedTask(FROM_HERE, | |
727 base::MessageLoop::QuitClosure(), | |
728 base::TimeDelta::FromSeconds(2)); | |
729 message_loop_.Run(); | |
730 | |
731 proxy->Stop(); | |
732 EXPECT_EQ(0, base->StopSend(ch)); | |
733 EXPECT_EQ(0, base->StopPlayout(ch)); | |
734 EXPECT_EQ(0, base->DeleteChannel(ch)); | |
735 EXPECT_EQ(0, base->Terminate()); | |
736 } | |
737 | |
738 // Uses WebRtcAudioDeviceImpl to play out recorded audio in loopback. | |
739 // An external transport implementation is utilized to feed back RTP packets | |
740 // which are recorded, encoded, packetized into RTP packets and finally | |
741 // "transmitted". The RTP packets are then fed back into the VoiceEngine | |
742 // where they are decoded and played out on the default audio output device. | |
743 // Disabled when running headless since the bots don't have the required config. | |
744 // TODO(henrika): improve quality by using a wideband codec, enabling noise- | |
745 // suppressions etc. | |
746 // FullDuplexAudioWithAGC is flaky on Android, disable it for now. | |
747 // Also flakily hangs on Windows: crbug.com/269348. | |
748 #if defined(OS_ANDROID) || defined(OS_WIN) | |
749 #define MAYBE_FullDuplexAudioWithAGC DISABLED_FullDuplexAudioWithAGC | |
750 #else | |
751 #define MAYBE_FullDuplexAudioWithAGC FullDuplexAudioWithAGC | |
752 #endif | |
753 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) { | |
754 if (!has_output_devices_ || !has_input_devices_) { | |
755 LOG(WARNING) << "Missing audio devices."; | |
756 return; | |
757 } | |
758 | |
759 scoped_ptr<media::AudioHardwareConfig> config = | |
760 CreateRealHardwareConfig(audio_manager_.get()); | |
761 SetAudioHardwareConfig(config.get()); | |
762 | |
763 if (!HardwareSampleRatesAreValid()) | |
764 return; | |
765 | |
766 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | |
767 ASSERT_TRUE(engine.valid()); | |
768 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | |
769 ASSERT_TRUE(base.valid()); | |
770 | |
771 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | |
772 new WebRtcAudioDeviceImpl()); | |
773 int err = base->Init(webrtc_audio_device.get()); | |
774 ASSERT_EQ(0, err); | |
775 | |
776 ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get()); | |
777 ASSERT_TRUE(audio_processing.valid()); | |
778 #if defined(OS_ANDROID) | |
779 // On Android, by default AGC is off. | |
780 bool enabled = true; | |
781 webrtc::AgcModes agc_mode = webrtc::kAgcDefault; | |
782 EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode)); | |
783 EXPECT_FALSE(enabled); | |
784 #else | |
785 bool enabled = false; | |
786 webrtc::AgcModes agc_mode = webrtc::kAgcDefault; | |
787 EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode)); | |
788 EXPECT_TRUE(enabled); | |
789 EXPECT_EQ(agc_mode, webrtc::kAgcAdaptiveAnalog); | |
790 #endif | |
791 | |
792 int ch = base->CreateChannel(); | |
793 EXPECT_NE(-1, ch); | |
794 | |
795 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( | |
796 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)); | |
797 scoped_refptr<WebRtcAudioCapturer> capturer( | |
798 CreateAudioCapturer(webrtc_audio_device)); | |
799 EXPECT_TRUE(capturer); | |
800 scoped_ptr<WebRtcLocalAudioTrack> local_audio_track( | |
801 CreateAndStartLocalAudioTrack(adapter, capturer, webrtc_audio_device)); | |
802 // connect the VoE voice channel to the audio track adapter. | |
803 static_cast<webrtc::AudioTrackInterface*>( | |
804 adapter.get())->GetRenderer()->AddChannel(ch); | |
805 | |
806 ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get()); | |
807 ASSERT_TRUE(network.valid()); | |
808 scoped_ptr<WebRTCTransportImpl> transport( | |
809 new WebRTCTransportImpl(network.get())); | |
810 EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get())); | |
811 EXPECT_EQ(0, base->StartPlayout(ch)); | |
812 EXPECT_EQ(0, base->StartSend(ch)); | |
813 scoped_refptr<webrtc::MediaStreamInterface> media_stream( | |
814 new talk_base::RefCountedObject<MockMediaStream>("label")); | |
815 scoped_refptr<WebRtcAudioRenderer> renderer( | |
816 CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream)); | |
817 scoped_refptr<MediaStreamAudioRenderer> proxy( | |
818 renderer->CreateSharedAudioRendererProxy(media_stream)); | |
819 EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get())); | |
820 proxy->Start(); | |
821 proxy->Play(); | |
822 | |
823 VLOG(0) << ">> You should now be able to hear yourself in loopback..."; | |
824 message_loop_.PostDelayedTask(FROM_HERE, | |
825 base::MessageLoop::QuitClosure(), | |
826 base::TimeDelta::FromSeconds(2)); | |
827 message_loop_.Run(); | |
828 | |
829 capturer->Stop(); | |
830 proxy->Stop(); | |
831 EXPECT_EQ(0, base->StopSend(ch)); | |
832 EXPECT_EQ(0, base->StopPlayout(ch)); | |
833 | |
834 EXPECT_EQ(0, base->DeleteChannel(ch)); | |
835 EXPECT_EQ(0, base->Terminate()); | |
836 } | |
837 | |
838 // Test times out on bots, see http://crbug.com/247447 | |
839 TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) { | |
840 if (!has_input_devices_) { | |
841 LOG(WARNING) << "Missing audio capture devices."; | |
842 return; | |
843 } | |
844 | |
845 scoped_ptr<media::AudioHardwareConfig> config = | |
846 CreateRealHardwareConfig(audio_manager_.get()); | |
847 SetAudioHardwareConfig(config.get()); | |
848 | |
849 if (!HardwareSampleRatesAreValid()) | |
850 return; | |
851 | |
852 scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device( | |
853 new WebRtcAudioDeviceImpl()); | |
854 | |
855 WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create()); | |
856 ASSERT_TRUE(engine.valid()); | |
857 | |
858 ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get()); | |
859 ASSERT_TRUE(base.valid()); | |
860 int err = base->Init(webrtc_audio_device.get()); | |
861 ASSERT_EQ(0, err); | |
862 | |
863 int ch = base->CreateChannel(); | |
864 EXPECT_NE(-1, ch); | |
865 | |
866 scoped_refptr<WebRtcAudioCapturer> capturer( | |
867 CreateAudioCapturer(webrtc_audio_device)); | |
868 EXPECT_TRUE(capturer); | |
869 base::WaitableEvent event(false, false); | |
870 scoped_ptr<MockMediaStreamAudioSink> sink( | |
871 new MockMediaStreamAudioSink(&event)); | |
872 | |
873 // Create and start a local audio track. Starting the audio track will connect | |
874 // the audio track to the capturer and also start the source of the capturer. | |
875 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( | |
876 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)); | |
877 scoped_ptr<WebRtcLocalAudioTrack> local_audio_track( | |
878 CreateAndStartLocalAudioTrack(adapter, capturer, sink.get())); | |
879 | |
880 // connect the VoE voice channel to the audio track adapter. | |
881 static_cast<webrtc::AudioTrackInterface*>( | |
882 adapter.get())->GetRenderer()->AddChannel(ch); | |
883 | |
884 base::Time start_time = base::Time::Now(); | |
885 EXPECT_EQ(0, base->StartSend(ch)); | |
886 | |
887 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); | |
888 int delay = (base::Time::Now() - start_time).InMilliseconds(); | |
889 PrintPerfResultMs("webrtc_recording_setup_c", "t", delay); | |
890 | |
891 capturer->Stop(); | |
892 EXPECT_EQ(0, base->StopSend(ch)); | |
893 EXPECT_EQ(0, base->DeleteChannel(ch)); | |
894 EXPECT_EQ(0, base->Terminate()); | |
895 } | |
896 | |
897 | |
898 // TODO(henrika): include on Android as well as soon as alla race conditions | |
899 // in OpenSLES are resolved. | |
900 #if defined(OS_ANDROID) | |
901 #define MAYBE_WebRtcPlayoutSetupTime DISABLED_WebRtcPlayoutSetupTime | |
902 #else | |
903 #define MAYBE_WebRtcPlayoutSetupTime WebRtcPlayoutSetupTime | |
904 #endif | |
905 TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_WebRtcPlayoutSetupTime) { | |
906 if (!has_output_devices_) { | |
907 LOG(WARNING) << "No output device detected."; | |
908 return; | |
909 } | |
910 | |
911 scoped_ptr<media::AudioHardwareConfig> config = | |
912 CreateRealHardwareConfig(audio_manager_.get()); | |
913 SetAudioHardwareConfig(config.get()); | |
914 | |
915 if (!HardwareSampleRatesAreValid()) | |
916 return; | |
917 | |
918 base::WaitableEvent event(false, false); | |
919 scoped_ptr<MockWebRtcAudioRendererSource> renderer_source( | |
920 new MockWebRtcAudioRendererSource(&event)); | |
921 | |
922 scoped_refptr<webrtc::MediaStreamInterface> media_stream( | |
923 new talk_base::RefCountedObject<MockMediaStream>("label")); | |
924 scoped_refptr<WebRtcAudioRenderer> renderer( | |
925 CreateDefaultWebRtcAudioRenderer(kRenderViewId, media_stream)); | |
926 renderer->Initialize(renderer_source.get()); | |
927 scoped_refptr<MediaStreamAudioRenderer> proxy( | |
928 renderer->CreateSharedAudioRendererProxy(media_stream)); | |
929 proxy->Start(); | |
930 | |
931 // Start the timer and playout. | |
932 base::Time start_time = base::Time::Now(); | |
933 proxy->Play(); | |
934 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); | |
935 int delay = (base::Time::Now() - start_time).InMilliseconds(); | |
936 PrintPerfResultMs("webrtc_playout_setup_c", "t", delay); | |
937 | |
938 proxy->Stop(); | |
939 } | |
940 | |
941 #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) | |
942 // Timing out on ARM linux bot: http://crbug.com/238490 | |
943 #define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \ | |
944 DISABLED_WebRtcLoopbackTimeWithoutSignalProcessing | |
945 #else | |
946 #define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \ | |
947 WebRtcLoopbackTimeWithoutSignalProcessing | |
948 #endif | |
949 | |
950 TEST_F(MAYBE_WebRTCAudioDeviceTest, | |
951 MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing) { | |
952 #if defined(OS_WIN) | |
953 // This test hangs on WinXP: see http://crbug.com/318189. | |
954 if (base::win::GetVersion() <= base::win::VERSION_XP) { | |
955 LOG(WARNING) << "Test disabled due to the test hangs on WinXP."; | |
956 return; | |
957 } | |
958 #endif | |
959 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), false); | |
960 PrintPerfResultMs("webrtc_loopback_without_sigal_processing (100 packets)", | |
961 "t", latency); | |
962 } | |
963 | |
964 #if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY) | |
965 // Timing out on ARM linux bot: http://crbug.com/238490 | |
966 #define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \ | |
967 DISABLED_WebRtcLoopbackTimeWithSignalProcessing | |
968 #else | |
969 #define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \ | |
970 WebRtcLoopbackTimeWithSignalProcessing | |
971 #endif | |
972 | |
973 TEST_F(MAYBE_WebRTCAudioDeviceTest, | |
974 MAYBE_WebRtcLoopbackTimeWithSignalProcessing) { | |
975 #if defined(OS_WIN) | |
976 // This test hangs on WinXP: see http://crbug.com/318189. | |
977 if (base::win::GetVersion() <= base::win::VERSION_XP) { | |
978 LOG(WARNING) << "Test disabled due to the test hangs on WinXP."; | |
979 return; | |
980 } | |
981 #endif | |
982 int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true); | |
983 PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)", | |
984 "t", latency); | |
985 } | |
986 | |
987 } // namespace content | |
OLD | NEW |