OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
wjia(left Chromium)
2013/08/28 22:23:16
2013.
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/basictypes.h" | |
6 #include "base/file_util.h" | |
7 #include "base/memory/scoped_ptr.h" | |
8 #include "base/message_loop/message_loop.h" | |
9 #include "base/path_service.h" | |
10 #include "base/strings/stringprintf.h" | |
11 #include "base/synchronization/lock.h" | |
12 #include "base/synchronization/waitable_event.h" | |
13 #include "base/test/test_timeouts.h" | |
14 #include "base/time/time.h" | |
15 #include "build/build_config.h" | |
16 #include "media/audio/android/audio_manager_android.h" | |
17 #include "media/audio/audio_io.h" | |
18 #include "media/audio/audio_manager_base.h" | |
19 #include "media/base/decoder_buffer.h" | |
20 #include "media/base/seekable_buffer.h" | |
21 #include "media/base/test_data_util.h" | |
22 #include "testing/gtest/include/gtest/gtest.h" | |
23 | |
24 namespace media { | |
25 | |
26 static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw"; | |
27 static const char kSpeechFile_16b_m_48k[] = "speech_16b_mono_48kHz.raw"; | |
28 static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw"; | |
29 static const char kSpeechFile_16b_m_44k[] = "speech_16b_mono_44kHz.raw"; | |
30 | |
31 static const int kBitsPerSample = 16; | |
32 | |
33 // TODO(henrika): add commens... | |
34 class MockAudioInputOutputCallbacks | |
35 : public AudioInputStream::AudioInputCallback, | |
36 public AudioOutputStream::AudioSourceCallback { | |
37 public: | |
38 MockAudioInputOutputCallbacks() | |
39 : input_callbacks_(0), | |
40 output_callbacks_(0), | |
41 input_callback_limit_(-1), | |
42 output_callback_limit_(-1), | |
43 input_errors_(0), | |
44 output_errors_(0) {}; | |
45 virtual ~MockAudioInputOutputCallbacks() {}; | |
46 | |
47 // Implementation of AudioInputCallback. | |
48 virtual void OnData(AudioInputStream* stream, const uint8* src, | |
49 uint32 size, uint32 hardware_delay_bytes, | |
50 double volume) OVERRIDE { | |
51 // DVLOG(1) << "+++ OnData +++"; | |
52 // int thread_id = static_cast<int>(base::PlatformThread::CurrentId()); | |
53 // DVLOG(1) << "##" << thread_id; | |
wjia(left Chromium)
2013/08/28 22:23:16
please remove unused code before checking in.
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
54 | |
55 if (input_callbacks_ == 0) | |
56 input_start_time_ = base::TimeTicks::Now(); | |
57 | |
58 input_callbacks_++; | |
59 | |
60 if (input_callback_limit_ > 0 && | |
61 input_callbacks_ == input_callback_limit_) { | |
62 input_end_time_ = base::TimeTicks::Now(); | |
63 input_event_->Signal(); | |
64 } | |
65 }; | |
66 virtual void OnClose(AudioInputStream* stream) OVERRIDE {} | |
67 virtual void OnError(AudioInputStream* stream) OVERRIDE { | |
68 input_errors_++; | |
69 } | |
70 | |
71 // Add comments.... | |
72 virtual int OnMoreData(AudioBus* dest, | |
73 AudioBuffersState buffers_state) OVERRIDE { | |
74 // DVLOG(1) << "--- OnMoreData ---"; | |
75 if (output_callbacks_ == 0) | |
76 output_start_time_ = base::TimeTicks::Now(); | |
77 | |
78 output_callbacks_++; | |
79 | |
80 if (output_callback_limit_ > 0 && | |
81 output_callbacks_ == output_callback_limit_) { | |
82 output_end_time_ = base::TimeTicks::Now(); | |
83 output_event_->Signal(); | |
84 } | |
wjia(left Chromium)
2013/08/28 22:23:16
It seems that these lines of code (line 75 through
henrika (OOO until Aug 14)
2013/08/29 14:13:59
I rewrote by creating arrays of size 2, an enumera
| |
85 | |
86 dest->Zero(); | |
87 return dest->frames(); | |
88 } | |
89 | |
90 virtual int OnMoreIOData(AudioBus* source, | |
91 AudioBus* dest, | |
92 AudioBuffersState buffers_state) { | |
93 NOTREACHED(); | |
94 return 0; | |
95 } | |
96 | |
97 virtual void OnError(AudioOutputStream* stream) OVERRIDE { | |
98 output_errors_++; | |
99 } | |
100 | |
101 int input_callbacks() { return input_callbacks_; } | |
102 void set_input_callback_limit(base::WaitableEvent* event, | |
103 int input_callback_limit) { | |
104 input_event_ = event; | |
105 input_callback_limit_ = input_callback_limit; | |
106 } | |
107 int input_errors() { return input_errors_; } | |
108 base::TimeTicks input_start_time() { return input_start_time_; } | |
109 base::TimeTicks input_end_time() { return input_end_time_; } | |
110 | |
111 int output_callbacks() { return output_callbacks_; } | |
112 void set_output_callback_limit(base::WaitableEvent* event, | |
113 int output_callback_limit) { | |
114 output_event_ = event; | |
115 output_callback_limit_ = output_callback_limit; | |
116 } | |
117 int output_errors() { return output_errors_; } | |
118 base::TimeTicks output_start_time() { return output_start_time_; } | |
119 base::TimeTicks output_end_time() { return output_end_time_; } | |
120 | |
121 private: | |
122 int input_callbacks_; | |
123 int output_callbacks_; | |
124 int input_callback_limit_; | |
125 int output_callback_limit_; | |
126 int input_errors_; | |
127 int output_errors_; | |
128 base::TimeTicks input_start_time_; | |
129 base::TimeTicks output_start_time_; | |
130 base::TimeTicks input_end_time_; | |
131 base::TimeTicks output_end_time_; | |
132 base::WaitableEvent* input_event_; | |
133 base::WaitableEvent* output_event_; | |
134 | |
135 DISALLOW_COPY_AND_ASSIGN(MockAudioInputOutputCallbacks); | |
136 }; | |
137 | |
138 // Implements AudioOutputStream::AudioSourceCallback and provides audio data | |
139 // by reading from a data file. | |
140 class FileAudioSource : public AudioOutputStream::AudioSourceCallback { | |
141 public: | |
142 explicit FileAudioSource(base::WaitableEvent* event, const std::string& name) | |
143 : event_(event), | |
144 pos_(0), | |
145 previous_marker_time_(base::TimeTicks::Now()) { | |
146 // Reads a test file from media/test/data directory and stores it in | |
147 // a DecoderBuffer. | |
148 file_ = ReadTestDataFile(name); | |
149 | |
150 // Log the name of the file which is used as input for this test. | |
151 base::FilePath file_path = GetTestDataFilePath(name); | |
152 printf("Reading from file: %s\n", file_path.value().c_str()); | |
153 fflush(stdout); | |
154 } | |
155 | |
156 virtual ~FileAudioSource() {} | |
157 | |
158 // AudioOutputStream::AudioSourceCallback implementation. | |
159 | |
160 // Use samples read from a data file and fill up the audio buffer | |
161 // provided to us in the callback. | |
162 virtual int OnMoreData(AudioBus* audio_bus, | |
163 AudioBuffersState buffers_state) { | |
164 // Add a '.'-marker once every second. | |
165 const base::TimeTicks now_time = base::TimeTicks::Now(); | |
166 const int diff = (now_time - previous_marker_time_).InMilliseconds(); | |
167 if (diff > 1000) { | |
168 printf("."); | |
169 fflush(stdout); | |
170 previous_marker_time_ = now_time; | |
171 } | |
wjia(left Chromium)
2013/08/28 22:23:16
This won't work well when tests are run in paralle
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Can you elaborate? Not sure if I understand.
Not
| |
172 | |
173 int max_size = | |
174 audio_bus->frames() * audio_bus->channels() * kBitsPerSample / 8; | |
wjia(left Chromium)
2013/08/28 22:23:16
nit: indent by 4.
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
175 | |
176 bool stop_playing = false; | |
177 | |
178 // Adjust data size and prepare for end signal if file has ended. | |
179 if (pos_ + static_cast<int>(max_size) > file_size()) { | |
wjia(left Chromium)
2013/08/28 22:23:16
|max_size| is "int".
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
180 stop_playing = true; | |
181 max_size = file_size() - pos_; | |
182 } | |
183 | |
184 // File data is stored as interleaved 16-bit values. Copy data samples from | |
185 // the file and deinterleave to match the audio bus format. | |
186 // FromInterleaved() will zero out any unfilled frames when there is not | |
187 // sufficient data remaining in the file to fill up the complete frame. | |
188 int frames = max_size / (audio_bus->channels() * kBitsPerSample / 8); | |
189 if (max_size) { | |
190 audio_bus->FromInterleaved( | |
191 file_->data() + pos_, frames, kBitsPerSample / 8); | |
192 pos_ += max_size; | |
193 } | |
194 | |
195 // Set event to ensure that the test can stop when the file has ended. | |
196 if (stop_playing) | |
197 event_->Signal(); | |
198 | |
199 return frames; | |
200 } | |
201 | |
202 virtual int OnMoreIOData(AudioBus* source, | |
203 AudioBus* dest, | |
204 AudioBuffersState buffers_state) OVERRIDE { | |
205 NOTREACHED(); | |
206 return 0; | |
207 } | |
208 | |
209 virtual void OnError(AudioOutputStream* stream) {} | |
210 | |
211 int file_size() { return file_->data_size(); } | |
212 | |
213 private: | |
214 base::WaitableEvent* event_; | |
215 int pos_; | |
216 scoped_refptr<DecoderBuffer> file_; | |
217 base::TimeTicks previous_marker_time_; | |
218 | |
219 DISALLOW_COPY_AND_ASSIGN(FileAudioSource); | |
220 }; | |
221 | |
222 // Implements AudioInputStream::AudioInputCallback and writes the recorded | |
223 // audio data to a local output file. | |
224 class FileAudioSink : public AudioInputStream::AudioInputCallback { | |
225 public: | |
226 explicit FileAudioSink(base::WaitableEvent* event, | |
227 const AudioParameters& params, | |
228 const std::string& file_name) | |
229 : event_(event), | |
230 params_(params), | |
231 previous_marker_time_(base::TimeTicks::Now()) { | |
232 // Allocate space for ~10 seconds of data. | |
233 const int kMaxBufferSize = 10 * params.GetBytesPerSecond(); | |
234 buffer_.reset(new media::SeekableBuffer(0, kMaxBufferSize)); | |
235 | |
236 // Open up the binary file which will be written to in the destructor. | |
237 base::FilePath file_path; | |
238 EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &file_path)); | |
239 file_path = file_path.AppendASCII(file_name.c_str()); | |
240 binary_file_ = file_util::OpenFile(file_path, "wb"); | |
241 DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file."; | |
242 printf("Writing to file : %s ", file_path.value().c_str()); | |
243 printf("of size %d bytes\n", buffer_->forward_capacity()); | |
244 fflush(stdout); | |
245 } | |
246 | |
247 virtual ~FileAudioSink() { | |
248 int bytes_written = 0; | |
249 while (bytes_written < buffer_->forward_capacity()) { | |
250 const uint8* chunk; | |
251 int chunk_size; | |
252 | |
253 // Stop writing if no more data is available. | |
254 if (!buffer_->GetCurrentChunk(&chunk, &chunk_size)) | |
255 break; | |
256 | |
257 // Write recorded data chunk to the file and prepare for next chunk. | |
258 fwrite(chunk, 1, chunk_size, binary_file_); | |
259 buffer_->Seek(chunk_size); | |
260 bytes_written += chunk_size; | |
261 } | |
262 file_util::CloseFile(binary_file_); | |
263 } | |
264 | |
265 // AudioInputStream::AudioInputCallback implementation. | |
266 virtual void OnData(AudioInputStream* stream, | |
267 const uint8* src, | |
268 uint32 size, | |
269 uint32 hardware_delay_bytes, | |
270 double volume) { | |
271 // Add a '.'-marker once every second. | |
272 const base::TimeTicks now_time = base::TimeTicks::Now(); | |
273 const int diff = (now_time - previous_marker_time_).InMilliseconds(); | |
274 if (diff > 1000) { | |
275 printf("."); | |
276 fflush(stdout); | |
277 previous_marker_time_ = now_time; | |
278 } | |
279 | |
280 // Store data data in a temporary buffer to avoid making blocking | |
281 // fwrite() calls in the audio callback. The complete buffer will be | |
282 // written to file in the destructor. | |
283 if (!buffer_->Append(src, size)) | |
284 event_->Signal(); | |
285 } | |
286 | |
287 virtual void OnClose(AudioInputStream* stream) {} | |
288 virtual void OnError(AudioInputStream* stream) {} | |
289 | |
290 private: | |
291 base::WaitableEvent* event_; | |
292 AudioParameters params_; | |
293 scoped_ptr<media::SeekableBuffer> buffer_; | |
294 FILE* binary_file_; | |
295 base::TimeTicks previous_marker_time_; | |
296 | |
297 DISALLOW_COPY_AND_ASSIGN(FileAudioSink); | |
298 }; | |
299 | |
300 // Implements AudioInputCallback and AudioSourceCallback to support full | |
301 // duplex audio where captured samples are played out in loopback after | |
302 // reading from a temporary FIFO storage. | |
303 class FullDuplexAudioSinkSource | |
304 : public AudioInputStream::AudioInputCallback, | |
305 public AudioOutputStream::AudioSourceCallback { | |
306 public: | |
307 explicit FullDuplexAudioSinkSource(const AudioParameters& params) | |
308 : params_(params), | |
309 previous_marker_time_(base::TimeTicks::Now()), | |
310 started_(false) { | |
311 // Start with a reasonably small FIFO size. It will be increased | |
312 // dynamically during the test if required. | |
313 fifo_.reset( | |
314 new media::SeekableBuffer(0, 2 * params.GetBytesPerBuffer())); | |
315 buffer_.reset(new uint8[params_.GetBytesPerBuffer()]); | |
316 } | |
317 | |
318 virtual ~FullDuplexAudioSinkSource() {} | |
319 | |
320 // AudioInputStream::AudioInputCallback implementation | |
321 virtual void OnData(AudioInputStream* stream, const uint8* src, | |
322 uint32 size, uint32 hardware_delay_bytes, | |
323 double volume) OVERRIDE { | |
324 // Add a '.'-marker once every second. | |
325 const base::TimeTicks now_time = base::TimeTicks::Now(); | |
326 const int diff = (now_time - previous_marker_time_).InMilliseconds(); | |
327 | |
328 base::AutoLock lock(lock_); | |
329 if (diff > 1000) { | |
330 started_ = true; | |
331 printf("."); | |
332 fflush(stdout); | |
333 previous_marker_time_ = now_time; | |
334 } | |
335 | |
336 // We add an inital delay of ~1 second before loopback starts to ensure | |
337 // a stable callback sequcence and to avoid inital burts which might add | |
338 // to the extra FIFO delay. | |
339 if (!started_) | |
340 return; | |
341 | |
342 if (!fifo_->Append(src, size)) { | |
343 fifo_->set_forward_capacity(2 * fifo_->forward_capacity()); | |
344 } | |
345 } | |
346 | |
347 virtual void OnClose(AudioInputStream* stream) OVERRIDE {} | |
348 virtual void OnError(AudioInputStream* stream) OVERRIDE {} | |
349 | |
350 // AudioOutputStream::AudioSourceCallback implementation | |
351 virtual int OnMoreData(AudioBus* dest, | |
352 AudioBuffersState buffers_state) OVERRIDE { | |
353 const int size_in_bytes = | |
354 (kBitsPerSample / 8) * dest->frames() * dest->channels(); | |
wjia(left Chromium)
2013/08/28 22:23:16
nit: indent by 4.
wjia(left Chromium)
2013/08/28 22:23:16
Do you need kBitsPerSample here? params_ has bits_
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
355 EXPECT_EQ(size_in_bytes, params_.GetBytesPerBuffer()); | |
356 | |
357 base::AutoLock lock(lock_); | |
358 | |
359 // We add an inital delay of ~1 second before loopback starts to ensure | |
360 // a stable callback sequcence and to avoid inital burts which might add | |
361 // to the extra FIFO delay. | |
362 if (!started_) { | |
363 dest->Zero(); | |
364 return dest->frames(); | |
365 } | |
366 | |
367 // Fill up destionation with zeros if the FIFO does not contain enough | |
368 // data to fulfill the request. | |
369 if (fifo_->forward_bytes() < size_in_bytes) { | |
370 dest->Zero(); | |
371 } else { | |
372 fifo_->Read(buffer_.get(), size_in_bytes); | |
373 dest->FromInterleaved( | |
374 buffer_.get(), dest->frames(), kBitsPerSample / 8); | |
wjia(left Chromium)
2013/08/28 22:23:16
ditto for kBitsPerSample.
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
375 } | |
376 | |
377 return dest->frames(); | |
378 } | |
379 virtual int OnMoreIOData(AudioBus* source, | |
380 AudioBus* dest, | |
381 AudioBuffersState buffers_state) OVERRIDE { | |
382 NOTREACHED(); | |
383 return 0; | |
384 } | |
385 virtual void OnError(AudioOutputStream* stream) OVERRIDE {} | |
386 | |
387 private: | |
388 // Converts from bytes to milliseconds given number of bytes and existing | |
389 // audio parameters. | |
390 double BytesToMilliseconds(int bytes) const { | |
391 const int frames = bytes / params_.GetBytesPerFrame(); | |
392 return (base::TimeDelta::FromMicroseconds( | |
393 frames * base::Time::kMicrosecondsPerSecond / | |
394 static_cast<float>(params_.sample_rate()))).InMillisecondsF(); | |
395 } | |
396 | |
397 AudioParameters params_; | |
398 base::TimeTicks previous_marker_time_; | |
399 base::Lock lock_; | |
400 scoped_ptr<media::SeekableBuffer> fifo_; | |
401 scoped_ptr<uint8[]> buffer_; | |
402 bool started_; | |
403 | |
404 DISALLOW_COPY_AND_ASSIGN(FullDuplexAudioSinkSource); | |
405 }; | |
406 | |
407 // Test fixture class. | |
408 class AudioAndroidTest : public testing::Test { | |
409 public: | |
410 AudioAndroidTest() | |
411 : audio_manager_(AudioManager::Create()) {} | |
412 | |
413 virtual ~AudioAndroidTest() {} | |
414 | |
415 AudioManager* audio_manager() { return audio_manager_.get(); } | |
416 | |
417 // Convenience method which ensures that we are not running on the build | |
418 // bots and that at least one valid input and output device can be found. | |
419 bool CanRunAudioTests() { | |
420 bool input = audio_manager()->HasAudioInputDevices(); | |
421 bool output = audio_manager()->HasAudioOutputDevices(); | |
422 LOG_IF(WARNING, !input) << "No input device detected."; | |
423 LOG_IF(WARNING, !output) << "No output device detected."; | |
424 return input && output; | |
wjia(left Chromium)
2013/08/28 22:23:16
I am not sure if I understand the logic here. This
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Really good point. Did not think of that. Makes se
| |
425 } | |
426 | |
427 // Converts AudioParameters::Format enumerator to readable string. | |
428 std::string FormatToString(AudioParameters::Format format) { | |
429 if (format == AudioParameters::AUDIO_PCM_LINEAR) | |
430 return std::string("AUDIO_PCM_LINEAR"); | |
431 else if (format == AudioParameters::AUDIO_PCM_LOW_LATENCY) | |
432 return std::string("AUDIO_PCM_LINEAR"); | |
433 else if (format == AudioParameters::AUDIO_FAKE) | |
434 return std::string("AUDIO_FAKE"); | |
435 else if (format == AudioParameters::AUDIO_LAST_FORMAT) | |
436 return std::string("AUDIO_LAST_FORMAT"); | |
437 else | |
438 return std::string(); | |
wjia(left Chromium)
2013/08/28 22:23:16
use switch?
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
439 } | |
440 | |
441 // Converts ChannelLayout enumerator to readable string. Does not include | |
442 // multi-channel cases since these layouts are not supported on Android. | |
443 std::string ChannelLayoutToString(ChannelLayout channel_layout) { | |
444 if (channel_layout == CHANNEL_LAYOUT_NONE) | |
445 return std::string("CHANNEL_LAYOUT_NONE"); | |
446 else if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) | |
447 return std::string("CHANNEL_LAYOUT_UNSUPPORTED"); | |
448 else if (channel_layout == CHANNEL_LAYOUT_MONO) | |
449 return std::string("CHANNEL_LAYOUT_MONO"); | |
450 else if (channel_layout == CHANNEL_LAYOUT_STEREO) | |
451 return std::string("CHANNEL_LAYOUT_STEREO"); | |
452 else | |
453 return std::string("CHANNEL_LAYOUT_UNSUPPORTED"); | |
wjia(left Chromium)
2013/08/28 22:23:16
ditto.
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
454 } | |
455 | |
456 void PrintAudioParameters(AudioParameters params) { | |
457 printf("format : %s\n", FormatToString(params.format()).c_str()); | |
458 printf("channel_layout : %s\n", | |
459 ChannelLayoutToString(params.channel_layout()).c_str()); | |
460 printf("sample_rate : %d\n", params.sample_rate()); | |
461 printf("bits_per_sample : %d\n", params.bits_per_sample()); | |
462 printf("frames_per_buffer: %d\n", params.frames_per_buffer()); | |
463 printf("channels : %d\n", params.channels()); | |
464 printf("bytes per buffer : %d\n", params.GetBytesPerBuffer()); | |
465 printf("bytes per second : %d\n", params.GetBytesPerSecond()); | |
466 printf("bytes per frame : %d\n", params.GetBytesPerFrame()); | |
467 } | |
468 | |
469 AudioParameters GetDefaultInputStreamParameters() { | |
470 return audio_manager()->GetInputStreamParameters( | |
471 AudioManagerBase::kDefaultDeviceId); | |
472 } | |
473 | |
474 AudioParameters GetDefaultOutputStreamParameters() { | |
475 return audio_manager()->GetDefaultOutputStreamParameters(); | |
476 } | |
477 | |
478 double TimeBetweenCallbacks(AudioParameters params) const { | |
wjia(left Chromium)
2013/08/28 22:23:16
It's better to call this "ExpectedTimeBetweenCallb
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
479 return (base::TimeDelta::FromMicroseconds( | |
wjia(left Chromium)
2013/08/28 22:23:16
nit: indent.
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Done.
| |
480 params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond / | |
481 static_cast<float>(params.sample_rate()))).InMillisecondsF(); | |
482 } | |
483 | |
484 #define START_STREAM_AND_WAIT_FOR_EVENT(stream) \ | |
485 EXPECT_TRUE(stream->Open()); \ | |
486 stream->Start(&io_callbacks_); \ | |
487 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); \ | |
488 stream->Stop(); \ | |
489 stream->Close() | |
490 | |
491 void StartInputStreamCallbacks(const AudioParameters& params) { | |
492 double time_between_callbacks_ms = TimeBetweenCallbacks(params); | |
493 const int num_callbacks = (1000.0 / time_between_callbacks_ms); | |
494 | |
495 base::WaitableEvent event(false, false); | |
496 io_callbacks_.set_input_callback_limit(&event, num_callbacks); | |
497 | |
498 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( | |
499 params, AudioManagerBase::kDefaultDeviceId); | |
500 EXPECT_TRUE(ais); | |
501 START_STREAM_AND_WAIT_FOR_EVENT(ais); | |
502 | |
503 EXPECT_EQ(io_callbacks_.input_callbacks(), num_callbacks); | |
504 EXPECT_EQ(io_callbacks_.input_errors(), 0); | |
505 | |
506 double actual_time_between_callbacks_ms = ( | |
507 (io_callbacks_.input_end_time() - io_callbacks_.input_start_time()) / | |
508 (io_callbacks_.input_callbacks() - 1)).InMillisecondsF(); | |
509 printf("time between callbacks: %.2fms\n", time_between_callbacks_ms); | |
510 printf("actual time between callbacks: %.2fms\n", | |
511 actual_time_between_callbacks_ms); | |
512 EXPECT_GE(actual_time_between_callbacks_ms, | |
513 0.75 * time_between_callbacks_ms); | |
514 EXPECT_LE(actual_time_between_callbacks_ms, | |
515 1.25 * time_between_callbacks_ms); | |
wjia(left Chromium)
2013/08/28 22:23:16
Is 25% margin good enough for one second audio inp
henrika (OOO until Aug 14)
2013/08/29 14:13:59
Good point. So far so good but I've been close on
| |
516 } | |
517 | |
518 void StartOutputStreamCallbacks(const AudioParameters& params) { | |
519 double time_between_callbacks_ms = TimeBetweenCallbacks(params); | |
520 const int num_callbacks = (1000.0 / time_between_callbacks_ms); | |
521 | |
522 base::WaitableEvent event(false, false); | |
523 io_callbacks_.set_output_callback_limit(&event, num_callbacks); | |
524 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( | |
525 params, std::string()); | |
526 EXPECT_TRUE(aos); | |
527 START_STREAM_AND_WAIT_FOR_EVENT(aos); | |
528 | |
529 EXPECT_EQ(io_callbacks_.output_callbacks(), num_callbacks); | |
530 EXPECT_EQ(io_callbacks_.output_errors(), 0); | |
531 | |
532 double actual_time_between_callbacks_ms = ( | |
533 (io_callbacks_.output_end_time() - io_callbacks_.output_start_time()) / | |
534 (io_callbacks_.output_callbacks() - 1)).InMillisecondsF(); | |
535 printf("time between callbacks: %.2fms\n", time_between_callbacks_ms); | |
536 printf("actual time between callbacks: %.2fms\n", | |
537 actual_time_between_callbacks_ms); | |
538 EXPECT_GE(actual_time_between_callbacks_ms, | |
539 0.75 * time_between_callbacks_ms); | |
540 EXPECT_LE(actual_time_between_callbacks_ms, | |
541 1.25 * time_between_callbacks_ms); | |
542 } | |
543 | |
544 protected: | |
545 base::MessageLoopForUI message_loop_; | |
546 scoped_ptr<AudioManager> audio_manager_; | |
547 MockAudioInputOutputCallbacks io_callbacks_; | |
548 | |
549 DISALLOW_COPY_AND_ASSIGN(AudioAndroidTest); | |
550 }; | |
551 | |
552 // Get the default audio input parameters and log the result. | |
553 TEST_F(AudioAndroidTest, GetInputStreamParameters) { | |
554 if (!CanRunAudioTests()) | |
555 return; | |
556 AudioParameters params = GetDefaultInputStreamParameters(); | |
557 EXPECT_TRUE(params.IsValid()); | |
558 PrintAudioParameters(params); | |
559 } | |
560 | |
561 // Get the default audio output parameters and log the result. | |
562 TEST_F(AudioAndroidTest, GetDefaultOutputStreamParameters) { | |
563 if (!CanRunAudioTests()) | |
564 return; | |
565 AudioParameters params = GetDefaultOutputStreamParameters(); | |
566 EXPECT_TRUE(params.IsValid()); | |
567 PrintAudioParameters(params); | |
568 } | |
569 | |
570 // Check if low-latency output is supported and log the result as output. | |
571 TEST_F(AudioAndroidTest, IsAudioLowLatencySupported) { | |
572 if (!CanRunAudioTests()) | |
573 return; | |
574 AudioManagerAndroid* manager = | |
575 static_cast<AudioManagerAndroid*>(audio_manager()); | |
576 bool low_latency = manager->IsAudioLowLatencySupported(); | |
577 low_latency ? printf("Low latency output is supported\n") : | |
578 printf("Low latency output is *not* supported\n"); | |
579 } | |
580 | |
581 // Ensure that a default input stream can be created and closed. | |
582 TEST_F(AudioAndroidTest, CreateAndCloseInputStream) { | |
583 if (!CanRunAudioTests()) | |
584 return; | |
585 AudioParameters params = GetDefaultInputStreamParameters(); | |
586 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( | |
587 params, AudioManagerBase::kDefaultDeviceId); | |
588 EXPECT_TRUE(ais); | |
589 ais->Close(); | |
590 } | |
591 | |
592 // Ensure that a default output stream can be created and closed. | |
593 // TODO(henrika): should we also verify that this API changes the audio mode | |
594 // to communication mode, and calls RegisterHeadsetReceiver, the first time | |
595 // it is called? | |
596 TEST_F(AudioAndroidTest, CreateAndCloseOutputStream) { | |
597 if (!CanRunAudioTests()) | |
598 return; | |
599 AudioParameters params = GetDefaultOutputStreamParameters(); | |
600 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( | |
601 params, std::string()); | |
602 EXPECT_TRUE(aos); | |
603 aos->Close(); | |
604 } | |
605 | |
606 // Ensure that a default input stream can be opened and closed. | |
607 TEST_F(AudioAndroidTest, OpenAndCloseInputStream) { | |
608 if (!CanRunAudioTests()) | |
609 return; | |
610 AudioParameters params = GetDefaultInputStreamParameters(); | |
611 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( | |
612 params, AudioManagerBase::kDefaultDeviceId); | |
613 EXPECT_TRUE(ais); | |
614 EXPECT_TRUE(ais->Open()); | |
615 ais->Close(); | |
616 } | |
617 | |
618 // Ensure that a default output stream can be opened and closed. | |
619 TEST_F(AudioAndroidTest, OpenAndCloseOutputStream) { | |
620 if (!CanRunAudioTests()) | |
621 return; | |
622 AudioParameters params = GetDefaultOutputStreamParameters(); | |
623 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( | |
624 params, std::string()); | |
625 EXPECT_TRUE(aos); | |
626 EXPECT_TRUE(aos->Open()); | |
627 aos->Close(); | |
628 } | |
629 | |
630 // Start input streaming using default input parameters and ensure that the | |
631 // callback sequence is sane. | |
632 TEST_F(AudioAndroidTest, StartInputStreamCallbacks) { | |
633 if (!CanRunAudioTests()) | |
634 return; | |
635 AudioParameters params = GetDefaultInputStreamParameters(); | |
636 StartInputStreamCallbacks(params); | |
637 } | |
638 | |
639 // Start input streaming using non default input parameters and ensure that the | |
640 // callback sequence is sane. The only change we make in this test is to select | |
641 // a 10ms buffer size instead of the default size. | |
642 // TODO(henrika): possibly add support for more vatiations. | |
643 TEST_F(AudioAndroidTest, StartInputStreamCallbacksNonDefaultParameters) { | |
644 if (!CanRunAudioTests()) | |
645 return; | |
646 AudioParameters native_params = GetDefaultInputStreamParameters(); | |
647 AudioParameters params(native_params.format(), | |
648 native_params.channel_layout(), | |
649 native_params.sample_rate(), | |
650 native_params.bits_per_sample(), | |
651 native_params.sample_rate() / 100); | |
652 StartInputStreamCallbacks(params); | |
653 } | |
654 | |
655 // Start output streaming using default output parameters and ensure that the | |
656 // callback sequence is sane. | |
657 TEST_F(AudioAndroidTest, StartOutputStreamCallbacks) { | |
658 if (!CanRunAudioTests()) | |
659 return; | |
660 AudioParameters params = GetDefaultOutputStreamParameters(); | |
661 StartOutputStreamCallbacks(params); | |
662 } | |
663 | |
664 // Start output streaming using non default output parameters and ensure that | |
665 // the callback sequence is sane. The only changed we make in this test is to | |
666 // select a 10ms buffer size instead of the default size and to open up the | |
667 // device in mono. | |
668 // TODO(henrika): possibly add support for more vatiations. | |
669 TEST_F(AudioAndroidTest, StartOutputStreamCallbacksNonDefaultParameters) { | |
670 if (!CanRunAudioTests()) | |
671 return; | |
672 AudioParameters native_params = GetDefaultOutputStreamParameters(); | |
673 AudioParameters params(native_params.format(), | |
674 CHANNEL_LAYOUT_MONO, | |
675 native_params.sample_rate(), | |
676 native_params.bits_per_sample(), | |
677 native_params.sample_rate() / 100); | |
678 StartOutputStreamCallbacks(params); | |
679 } | |
680 | |
681 TEST_F(AudioAndroidTest, RunOutputStreamWithFileAsSource) { | |
682 if (!CanRunAudioTests()) | |
683 return; | |
684 | |
685 AudioParameters params = GetDefaultOutputStreamParameters(); | |
686 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( | |
687 params, std::string()); | |
688 EXPECT_TRUE(aos); | |
689 | |
690 PrintAudioParameters(params); | |
691 fflush(stdout); | |
692 | |
693 std::string file_name; | |
694 if (params.sample_rate() == 48000 && params.channels() == 2) { | |
695 file_name = kSpeechFile_16b_s_48k; | |
696 } else if (params.sample_rate() == 48000 && params.channels() == 1) { | |
697 file_name = kSpeechFile_16b_m_48k; | |
698 } else if (params.sample_rate() == 44100 && params.channels() == 2) { | |
699 file_name = kSpeechFile_16b_s_44k; | |
700 } else if (params.sample_rate() == 44100 && params.channels() == 1) { | |
701 file_name = kSpeechFile_16b_m_44k; | |
702 } else { | |
703 FAIL() << "This test supports 44.1kHz and 48kHz mono/stereo only."; | |
704 return; | |
705 } | |
706 | |
707 base::WaitableEvent event(false, false); | |
708 FileAudioSource source(&event, file_name); | |
709 | |
710 EXPECT_TRUE(aos->Open()); | |
711 aos->SetVolume(1.0); | |
712 aos->Start(&source); | |
713 printf(">> Verify that file is played out correctly"); | |
714 fflush(stdout); | |
715 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); | |
716 printf("\n"); | |
717 aos->Stop(); | |
718 aos->Close(); | |
719 } | |
720 | |
721 // Start input streaming and run it for ten seconds while recording to a | |
722 // local audio file. | |
723 TEST_F(AudioAndroidTest, RunSimplexInputStreamWithFileAsSink) { | |
724 if (!CanRunAudioTests()) | |
725 return; | |
726 | |
727 AudioParameters params = GetDefaultInputStreamParameters(); | |
728 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( | |
729 params, AudioManagerBase::kDefaultDeviceId); | |
730 EXPECT_TRUE(ais); | |
731 | |
732 PrintAudioParameters(params); | |
733 fflush(stdout); | |
734 | |
735 std::string file_name = base::StringPrintf("out_simplex_%d_%d_%d.pcm", | |
736 params.sample_rate(), params.frames_per_buffer(), params.channels()); | |
737 | |
738 base::WaitableEvent event(false, false); | |
739 FileAudioSink sink(&event, params, file_name); | |
740 | |
741 EXPECT_TRUE(ais->Open()); | |
742 ais->Start(&sink); | |
743 printf(">> Speak into the microphone to record audio"); | |
744 fflush(stdout); | |
745 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); | |
746 printf("\n"); | |
747 ais->Stop(); | |
748 ais->Close(); | |
749 } | |
750 | |
751 // Same test as RunSimplexInputStreamWithFileAsSink but this time output | |
752 // streaming is active as well (reads zeros only). | |
753 TEST_F(AudioAndroidTest, RunDuplexInputStreamWithFileAsSink) { | |
754 if (!CanRunAudioTests()) | |
755 return; | |
756 | |
757 AudioParameters in_params = GetDefaultInputStreamParameters(); | |
758 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( | |
759 in_params, AudioManagerBase::kDefaultDeviceId); | |
760 EXPECT_TRUE(ais); | |
761 | |
762 PrintAudioParameters(in_params); | |
763 fflush(stdout); | |
764 | |
765 AudioParameters out_params = | |
766 audio_manager()->GetDefaultOutputStreamParameters(); | |
767 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( | |
768 out_params, std::string()); | |
769 EXPECT_TRUE(aos); | |
770 | |
771 PrintAudioParameters(out_params); | |
772 fflush(stdout); | |
773 | |
774 std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm", | |
775 in_params.sample_rate(), in_params.frames_per_buffer(), | |
776 in_params.channels()); | |
777 | |
778 base::WaitableEvent event(false, false); | |
779 FileAudioSink sink(&event, in_params, file_name); | |
780 | |
781 EXPECT_TRUE(ais->Open()); | |
782 EXPECT_TRUE(aos->Open()); | |
783 ais->Start(&sink); | |
784 aos->Start(&io_callbacks_); | |
785 printf(">> Speak into the microphone to record audio"); | |
786 fflush(stdout); | |
787 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); | |
788 printf("\n"); | |
789 aos->Stop(); | |
790 ais->Stop(); | |
791 aos->Close(); | |
792 ais->Close(); | |
793 } | |
794 | |
795 TEST_F(AudioAndroidTest, RunInputAndOutputStreamsInFullDuplex) { | |
796 if (!CanRunAudioTests()) | |
797 return; | |
798 | |
799 // Get native audio parameters for the input side. | |
800 AudioParameters default_input_params = GetDefaultInputStreamParameters(); | |
801 | |
802 // Modify the parameters so that both input and output can use the same | |
803 // parameters by selecting 10ms as buffer size. This will also ensure that | |
804 // the output stream will be a mono stream since mono is default for input | |
805 // audio on Android. | |
806 AudioParameters io_params(default_input_params.format(), | |
807 default_input_params.channel_layout(), | |
808 default_input_params.sample_rate(), | |
809 default_input_params.bits_per_sample(), | |
810 default_input_params.sample_rate() / 100); | |
811 | |
812 PrintAudioParameters(io_params); | |
813 fflush(stdout); | |
814 | |
815 // Create input and output streams using the common audio parameters. | |
816 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( | |
817 io_params, AudioManagerBase::kDefaultDeviceId); | |
818 EXPECT_TRUE(ais); | |
819 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( | |
820 io_params, std::string()); | |
821 EXPECT_TRUE(aos); | |
822 | |
823 FullDuplexAudioSinkSource full_duplex(io_params); | |
824 | |
825 EXPECT_TRUE(ais->Open()); | |
826 EXPECT_TRUE(aos->Open()); | |
827 ais->Start(&full_duplex); | |
828 aos->Start(&full_duplex); | |
829 printf(">> Speak into the microphone and listen to the audio in loopback"); | |
830 fflush(stdout); | |
831 base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(10)); | |
832 printf("\n"); | |
833 aos->Stop(); | |
834 ais->Stop(); | |
835 aos->Close(); | |
836 ais->Close(); | |
837 } | |
838 | |
839 } // namespace media | |
OLD | NEW |