OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/basictypes.h" |
| 6 #include "base/file_util.h" |
| 7 #include "base/memory/scoped_ptr.h" |
| 8 #include "base/message_loop/message_loop.h" |
| 9 #include "base/path_service.h" |
| 10 #include "base/strings/stringprintf.h" |
| 11 #include "base/synchronization/lock.h" |
| 12 #include "base/synchronization/waitable_event.h" |
| 13 #include "base/test/test_timeouts.h" |
| 14 #include "base/time/time.h" |
| 15 #include "build/build_config.h" |
| 16 #include "media/audio/android/audio_manager_android.h" |
| 17 #include "media/audio/audio_io.h" |
| 18 #include "media/audio/audio_manager_base.h" |
| 19 #include "media/base/decoder_buffer.h" |
| 20 #include "media/base/seekable_buffer.h" |
| 21 #include "media/base/test_data_util.h" |
| 22 #include "testing/gtest/include/gtest/gtest.h" |
| 23 |
| 24 namespace media { |
| 25 |
| 26 static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw"; |
| 27 static const char kSpeechFile_16b_m_48k[] = "speech_16b_mono_48kHz.raw"; |
| 28 static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw"; |
| 29 static const char kSpeechFile_16b_m_44k[] = "speech_16b_mono_44kHz.raw"; |
| 30 |
| 31 static const int kBitsPerSample = 16; |
| 32 static const int kBytesPerSample = kBitsPerSample / 8; |
| 33 |
| 34 // Converts AudioParameters::Format enumerator to readable string. |
| 35 static std::string FormatToString(AudioParameters::Format format) { |
| 36 switch (format) { |
| 37 case AudioParameters::AUDIO_PCM_LINEAR: |
| 38 return std::string("AUDIO_PCM_LINEAR"); |
| 39 case AudioParameters::AUDIO_PCM_LOW_LATENCY: |
| 40 return std::string("AUDIO_PCM_LOW_LATENCY"); |
| 41 case AudioParameters::AUDIO_FAKE: |
| 42 return std::string("AUDIO_FAKE"); |
| 43 case AudioParameters::AUDIO_LAST_FORMAT: |
| 44 return std::string("AUDIO_LAST_FORMAT"); |
| 45 default: |
| 46 return std::string(); |
| 47 } |
| 48 } |
| 49 |
| 50 // Converts ChannelLayout enumerator to readable string. Does not include |
| 51 // multi-channel cases since these layouts are not supported on Android. |
| 52 static std::string LayoutToString(ChannelLayout channel_layout) { |
| 53 switch (channel_layout) { |
| 54 case CHANNEL_LAYOUT_NONE: |
| 55 return std::string("CHANNEL_LAYOUT_NONE"); |
| 56 case CHANNEL_LAYOUT_UNSUPPORTED: |
| 57 return std::string("CHANNEL_LAYOUT_UNSUPPORTED"); |
| 58 case CHANNEL_LAYOUT_MONO: |
| 59 return std::string("CHANNEL_LAYOUT_MONO"); |
| 60 case CHANNEL_LAYOUT_STEREO: |
| 61 return std::string("CHANNEL_LAYOUT_STEREO"); |
| 62 default: |
| 63 return std::string("CHANNEL_LAYOUT_UNSUPPORTED"); |
| 64 } |
| 65 } |
| 66 |
| 67 static double ExpectedTimeBetweenCallbacks(AudioParameters params) { |
| 68 return (base::TimeDelta::FromMicroseconds( |
| 69 params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond / |
| 70 static_cast<float>(params.sample_rate()))).InMillisecondsF(); |
| 71 } |
| 72 |
| 73 std::ostream& operator<<(std::ostream& os, const AudioParameters& params) { |
| 74 os << std::endl << "format: " << FormatToString(params.format()) << std::endl |
| 75 << "channel layout: " << LayoutToString(params.channel_layout()) |
| 76 << std::endl << "sample rate: " << params.sample_rate() << std::endl |
| 77 << "bits per sample: " << params.bits_per_sample() << std::endl |
| 78 << "frames per buffer: " << params.frames_per_buffer() << std::endl |
| 79 << "channels: " << params.channels() << std::endl |
| 80 << "bytes per buffer: " << params.GetBytesPerBuffer() << std::endl |
| 81 << "bytes per second: " << params.GetBytesPerSecond() << std::endl |
| 82 << "bytes per frame: " << params.GetBytesPerFrame() << std::endl |
| 83 << "frame size in ms: " << ExpectedTimeBetweenCallbacks(params); |
| 84 return os; |
| 85 } |
| 86 |
| 87 // Implements AudioInputCallback and AudioSourceCallback with some trivial |
| 88 // additional counting support to keep track of the number of callbacks, |
| 89 // number or error callbacks etc. It also allows the user to set an expected |
| 90 // number of callbacks, in any direction, before a provided event is signaled. |
| 91 class MockAudioInputOutputCallbacks |
| 92 : public AudioInputStream::AudioInputCallback, |
| 93 public AudioOutputStream::AudioSourceCallback { |
| 94 public: |
| 95 MockAudioInputOutputCallbacks() { Reset(); } |
| 96 ; |
| 97 virtual ~MockAudioInputOutputCallbacks() {}; |
| 98 |
| 99 // Implementation of AudioInputCallback. |
| 100 virtual void OnData(AudioInputStream* stream, |
| 101 const uint8* src, |
| 102 uint32 size, |
| 103 uint32 hardware_delay_bytes, |
| 104 double volume) OVERRIDE { |
| 105 UpdateCountersAndSignalWhenDone(kInput); |
| 106 } |
| 107 ; |
| 108 |
| 109 virtual void OnError(AudioInputStream* stream) OVERRIDE { errors_[kInput]++; } |
| 110 |
| 111 virtual void OnClose(AudioInputStream* stream) OVERRIDE {} |
| 112 |
| 113 // Implementation of AudioSourceCallback. |
| 114 virtual int OnMoreData(AudioBus* dest, |
| 115 AudioBuffersState buffers_state) OVERRIDE { |
| 116 UpdateCountersAndSignalWhenDone(kOutput); |
| 117 dest->Zero(); |
| 118 return dest->frames(); |
| 119 } |
| 120 |
| 121 virtual int OnMoreIOData(AudioBus* source, |
| 122 AudioBus* dest, |
| 123 AudioBuffersState buffers_state) OVERRIDE { |
| 124 NOTREACHED(); |
| 125 return 0; |
| 126 } |
| 127 |
| 128 virtual void OnError(AudioOutputStream* stream) OVERRIDE { |
| 129 errors_[kOutput]++; |
| 130 } |
| 131 |
| 132 void Reset() { |
| 133 for (int i = 0; i < 2; ++i) { |
| 134 callbacks_[i] = 0; |
| 135 callback_limit_[i] = -1; |
| 136 errors_[i] = 0; |
| 137 } |
| 138 } |
| 139 |
| 140 int input_callbacks() { return callbacks_[kInput]; } |
| 141 |
| 142 void set_input_callback_limit(base::WaitableEvent* event, |
| 143 int input_callback_limit) { |
| 144 event_[kInput] = event; |
| 145 callback_limit_[kInput] = input_callback_limit; |
| 146 } |
| 147 |
| 148 int input_errors() { return errors_[kInput]; } |
| 149 |
| 150 base::TimeTicks input_start_time() { return start_time_[kInput]; } |
| 151 |
| 152 base::TimeTicks input_end_time() { return end_time_[kInput]; } |
| 153 |
| 154 int output_callbacks() { return callbacks_[kOutput]; } |
| 155 |
| 156 void set_output_callback_limit(base::WaitableEvent* event, |
| 157 int output_callback_limit) { |
| 158 event_[kOutput] = event; |
| 159 callback_limit_[kOutput] = output_callback_limit; |
| 160 } |
| 161 |
| 162 int output_errors() { return errors_[kOutput]; } |
| 163 |
| 164 base::TimeTicks output_start_time() { return start_time_[kOutput]; } |
| 165 |
| 166 base::TimeTicks output_end_time() { return end_time_[kOutput]; } |
| 167 |
| 168 double average_time_between_input_callbacks_ms() { |
| 169 return ((input_end_time() - input_start_time()) / (input_callbacks() - 1)) |
| 170 .InMillisecondsF(); |
| 171 } |
| 172 |
| 173 double average_time_between_output_callbacks_ms() { |
| 174 return ((output_end_time() - output_start_time()) / |
| 175 (output_callbacks() - 1)).InMillisecondsF(); |
| 176 } |
| 177 |
| 178 private: |
| 179 void UpdateCountersAndSignalWhenDone(int dir) { |
| 180 if (callbacks_[dir] == 0) |
| 181 start_time_[dir] = base::TimeTicks::Now(); |
| 182 callbacks_[dir]++; |
| 183 if (callback_limit_[dir] > 0 && callbacks_[dir] == callback_limit_[dir]) { |
| 184 end_time_[dir] = base::TimeTicks::Now(); |
| 185 event_[dir]->Signal(); |
| 186 } |
| 187 } |
| 188 |
| 189 enum { |
| 190 kInput = 0, |
| 191 kOutput = 1 |
| 192 }; |
| 193 |
| 194 int callbacks_[2]; |
| 195 int callback_limit_[2]; |
| 196 int errors_[2]; |
| 197 base::TimeTicks start_time_[2]; |
| 198 base::TimeTicks end_time_[2]; |
| 199 base::WaitableEvent* event_[2]; |
| 200 |
| 201 DISALLOW_COPY_AND_ASSIGN(MockAudioInputOutputCallbacks); |
| 202 }; |
| 203 |
| 204 // Implements AudioOutputStream::AudioSourceCallback and provides audio data |
| 205 // by reading from a data file. |
| 206 class FileAudioSource : public AudioOutputStream::AudioSourceCallback { |
| 207 public: |
| 208 explicit FileAudioSource(base::WaitableEvent* event, const std::string& name) |
| 209 : event_(event), pos_(0), previous_marker_time_(base::TimeTicks::Now()) { |
| 210 // Reads a test file from media/test/data directory and stores it in |
| 211 // a DecoderBuffer. |
| 212 file_ = ReadTestDataFile(name); |
| 213 |
| 214 // Log the name of the file which is used as input for this test. |
| 215 base::FilePath file_path = GetTestDataFilePath(name); |
| 216 LOG(INFO) << "Reading from file: " << file_path.value().c_str(); |
| 217 } |
| 218 |
| 219 virtual ~FileAudioSource() {} |
| 220 |
| 221 // AudioOutputStream::AudioSourceCallback implementation. |
| 222 |
| 223 // Use samples read from a data file and fill up the audio buffer |
| 224 // provided to us in the callback. |
| 225 virtual int OnMoreData(AudioBus* audio_bus, |
| 226 AudioBuffersState buffers_state) OVERRIDE { |
| 227 // Add a '.'-marker once every second. |
| 228 const base::TimeTicks now_time = base::TimeTicks::Now(); |
| 229 const int diff = (now_time - previous_marker_time_).InMilliseconds(); |
| 230 if (diff > 1000) { |
| 231 printf("."); |
| 232 fflush(stdout); |
| 233 previous_marker_time_ = now_time; |
| 234 } |
| 235 |
| 236 bool stop_playing = false; |
| 237 int max_size = |
| 238 audio_bus->frames() * audio_bus->channels() * kBytesPerSample; |
| 239 |
| 240 // Adjust data size and prepare for end signal if file has ended. |
| 241 if (pos_ + max_size > file_size()) { |
| 242 stop_playing = true; |
| 243 max_size = file_size() - pos_; |
| 244 } |
| 245 |
| 246 // File data is stored as interleaved 16-bit values. Copy data samples from |
| 247 // the file and deinterleave to match the audio bus format. |
| 248 // FromInterleaved() will zero out any unfilled frames when there is not |
| 249 // sufficient data remaining in the file to fill up the complete frame. |
| 250 int frames = max_size / (audio_bus->channels() * kBytesPerSample); |
| 251 if (max_size) { |
| 252 audio_bus->FromInterleaved(file_->data() + pos_, frames, kBytesPerSample); |
| 253 pos_ += max_size; |
| 254 } |
| 255 |
| 256 // Set event to ensure that the test can stop when the file has ended. |
| 257 if (stop_playing) |
| 258 event_->Signal(); |
| 259 |
| 260 return frames; |
| 261 } |
| 262 |
| 263 virtual int OnMoreIOData(AudioBus* source, |
| 264 AudioBus* dest, |
| 265 AudioBuffersState buffers_state) OVERRIDE { |
| 266 NOTREACHED(); |
| 267 return 0; |
| 268 } |
| 269 |
| 270 virtual void OnError(AudioOutputStream* stream) OVERRIDE {} |
| 271 |
| 272 int file_size() { return file_->data_size(); } |
| 273 |
| 274 private: |
| 275 base::WaitableEvent* event_; |
| 276 int pos_; |
| 277 scoped_refptr<DecoderBuffer> file_; |
| 278 base::TimeTicks previous_marker_time_; |
| 279 |
| 280 DISALLOW_COPY_AND_ASSIGN(FileAudioSource); |
| 281 }; |
| 282 |
| 283 // Implements AudioInputStream::AudioInputCallback and writes the recorded |
| 284 // audio data to a local output file. |
| 285 class FileAudioSink : public AudioInputStream::AudioInputCallback { |
| 286 public: |
| 287 explicit FileAudioSink(base::WaitableEvent* event, |
| 288 const AudioParameters& params, |
| 289 const std::string& file_name) |
| 290 : event_(event), |
| 291 params_(params), |
| 292 previous_marker_time_(base::TimeTicks::Now()) { |
| 293 // Allocate space for ~10 seconds of data. |
| 294 const int kMaxBufferSize = 10 * params.GetBytesPerSecond(); |
| 295 buffer_.reset(new media::SeekableBuffer(0, kMaxBufferSize)); |
| 296 |
| 297 // Open up the binary file which will be written to in the destructor. |
| 298 base::FilePath file_path; |
| 299 EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &file_path)); |
| 300 file_path = file_path.AppendASCII(file_name.c_str()); |
| 301 binary_file_ = file_util::OpenFile(file_path, "wb"); |
| 302 DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file."; |
| 303 LOG(INFO) << "Writing to file : " << file_path.value().c_str(); |
| 304 } |
| 305 |
| 306 virtual ~FileAudioSink() { |
| 307 int bytes_written = 0; |
| 308 while (bytes_written < buffer_->forward_capacity()) { |
| 309 const uint8* chunk; |
| 310 int chunk_size; |
| 311 |
| 312 // Stop writing if no more data is available. |
| 313 if (!buffer_->GetCurrentChunk(&chunk, &chunk_size)) |
| 314 break; |
| 315 |
| 316 // Write recorded data chunk to the file and prepare for next chunk. |
| 317 fwrite(chunk, 1, chunk_size, binary_file_); |
| 318 buffer_->Seek(chunk_size); |
| 319 bytes_written += chunk_size; |
| 320 } |
| 321 file_util::CloseFile(binary_file_); |
| 322 } |
| 323 |
| 324 // AudioInputStream::AudioInputCallback implementation. |
| 325 virtual void OnData(AudioInputStream* stream, |
| 326 const uint8* src, |
| 327 uint32 size, |
| 328 uint32 hardware_delay_bytes, |
| 329 double volume) OVERRIDE { |
| 330 // Add a '.'-marker once every second. |
| 331 const base::TimeTicks now_time = base::TimeTicks::Now(); |
| 332 const int diff = (now_time - previous_marker_time_).InMilliseconds(); |
| 333 if (diff > 1000) { |
| 334 printf("."); |
| 335 fflush(stdout); |
| 336 previous_marker_time_ = now_time; |
| 337 } |
| 338 |
| 339 // Store data data in a temporary buffer to avoid making blocking |
| 340 // fwrite() calls in the audio callback. The complete buffer will be |
| 341 // written to file in the destructor. |
| 342 if (!buffer_->Append(src, size)) |
| 343 event_->Signal(); |
| 344 } |
| 345 |
| 346 virtual void OnClose(AudioInputStream* stream) OVERRIDE {} |
| 347 virtual void OnError(AudioInputStream* stream) OVERRIDE {} |
| 348 |
| 349 private: |
| 350 base::WaitableEvent* event_; |
| 351 AudioParameters params_; |
| 352 scoped_ptr<media::SeekableBuffer> buffer_; |
| 353 FILE* binary_file_; |
| 354 base::TimeTicks previous_marker_time_; |
| 355 |
| 356 DISALLOW_COPY_AND_ASSIGN(FileAudioSink); |
| 357 }; |
| 358 |
| 359 // Implements AudioInputCallback and AudioSourceCallback to support full |
| 360 // duplex audio where captured samples are played out in loopback after |
| 361 // reading from a temporary FIFO storage. |
| 362 class FullDuplexAudioSinkSource |
| 363 : public AudioInputStream::AudioInputCallback, |
| 364 public AudioOutputStream::AudioSourceCallback { |
| 365 public: |
| 366 explicit FullDuplexAudioSinkSource(const AudioParameters& params) |
| 367 : params_(params), |
| 368 previous_marker_time_(base::TimeTicks::Now()), |
| 369 started_(false) { |
| 370 // Start with a reasonably small FIFO size. It will be increased |
| 371 // dynamically during the test if required. |
| 372 fifo_.reset(new media::SeekableBuffer(0, 2 * params.GetBytesPerBuffer())); |
| 373 buffer_.reset(new uint8[params_.GetBytesPerBuffer()]); |
| 374 } |
| 375 |
| 376 virtual ~FullDuplexAudioSinkSource() {} |
| 377 |
| 378 // AudioInputStream::AudioInputCallback implementation |
| 379 virtual void OnData(AudioInputStream* stream, |
| 380 const uint8* src, |
| 381 uint32 size, |
| 382 uint32 hardware_delay_bytes, |
| 383 double volume) OVERRIDE { |
| 384 // Add a '.'-marker once every second. |
| 385 const base::TimeTicks now_time = base::TimeTicks::Now(); |
| 386 const int diff = (now_time - previous_marker_time_).InMilliseconds(); |
| 387 |
| 388 base::AutoLock lock(lock_); |
| 389 if (diff > 1000) { |
| 390 started_ = true; |
| 391 previous_marker_time_ = now_time; |
| 392 |
| 393 // Print out the extra delay added by the FIFO. This is a best effort |
| 394 // estimate. We might be +- 10ms off here. |
| 395 int extra_fio_delay = |
| 396 static_cast<int>(BytesToMilliseconds(fifo_->forward_bytes() + size)); |
| 397 printf("%d ", extra_fio_delay); |
| 398 fflush(stdout); |
| 399 } |
| 400 |
| 401 // We add an initial delay of ~1 second before loopback starts to ensure |
| 402 // a stable callback sequence and to avoid initial bursts which might add |
| 403 // to the extra FIFO delay. |
| 404 if (!started_) |
| 405 return; |
| 406 |
| 407 // Append new data to the FIFO and extend the size if the mac capacity |
| 408 // was exceeded. Flush the FIFO if is extended just in case. |
| 409 if (!fifo_->Append(src, size)) { |
| 410 fifo_->set_forward_capacity(2 * fifo_->forward_capacity()); |
| 411 printf("+ "); |
| 412 fflush(stdout); |
| 413 fifo_->Clear(); |
| 414 } |
| 415 } |
| 416 |
| 417 virtual void OnClose(AudioInputStream* stream) OVERRIDE {} |
| 418 virtual void OnError(AudioInputStream* stream) OVERRIDE {} |
| 419 |
| 420 // AudioOutputStream::AudioSourceCallback implementation |
| 421 virtual int OnMoreData(AudioBus* dest, |
| 422 AudioBuffersState buffers_state) OVERRIDE { |
| 423 const int size_in_bytes = |
| 424 (params_.bits_per_sample() / 8) * dest->frames() * dest->channels(); |
| 425 EXPECT_EQ(size_in_bytes, params_.GetBytesPerBuffer()); |
| 426 |
| 427 base::AutoLock lock(lock_); |
| 428 |
| 429 // We add an initial delay of ~1 second before loopback starts to ensure |
| 430 // a stable callback sequences and to avoid initial bursts which might add |
| 431 // to the extra FIFO delay. |
| 432 if (!started_) { |
| 433 dest->Zero(); |
| 434 return dest->frames(); |
| 435 } |
| 436 |
| 437 // Fill up destination with zeros if the FIFO does not contain enough |
| 438 // data to fulfill the request. |
| 439 if (fifo_->forward_bytes() < size_in_bytes) { |
| 440 dest->Zero(); |
| 441 } else { |
| 442 fifo_->Read(buffer_.get(), size_in_bytes); |
| 443 dest->FromInterleaved( |
| 444 buffer_.get(), dest->frames(), params_.bits_per_sample() / 8); |
| 445 } |
| 446 |
| 447 return dest->frames(); |
| 448 } |
| 449 |
| 450 virtual int OnMoreIOData(AudioBus* source, |
| 451 AudioBus* dest, |
| 452 AudioBuffersState buffers_state) OVERRIDE { |
| 453 NOTREACHED(); |
| 454 return 0; |
| 455 } |
| 456 |
| 457 virtual void OnError(AudioOutputStream* stream) OVERRIDE {} |
| 458 |
| 459 private: |
| 460 // Converts from bytes to milliseconds given number of bytes and existing |
| 461 // audio parameters. |
| 462 double BytesToMilliseconds(int bytes) const { |
| 463 const int frames = bytes / params_.GetBytesPerFrame(); |
| 464 return (base::TimeDelta::FromMicroseconds( |
| 465 frames * base::Time::kMicrosecondsPerSecond / |
| 466 static_cast<float>(params_.sample_rate()))).InMillisecondsF(); |
| 467 } |
| 468 |
| 469 AudioParameters params_; |
| 470 base::TimeTicks previous_marker_time_; |
| 471 base::Lock lock_; |
| 472 scoped_ptr<media::SeekableBuffer> fifo_; |
| 473 scoped_ptr<uint8[]> buffer_; |
| 474 bool started_; |
| 475 |
| 476 DISALLOW_COPY_AND_ASSIGN(FullDuplexAudioSinkSource); |
| 477 }; |
| 478 |
| 479 // Test fixture class. |
| 480 class AudioAndroidTest : public testing::Test { |
| 481 public: |
| 482 AudioAndroidTest() : audio_manager_(AudioManager::Create()) {} |
| 483 |
| 484 virtual ~AudioAndroidTest() {} |
| 485 |
| 486 AudioManager* audio_manager() { return audio_manager_.get(); } |
| 487 |
| 488 AudioParameters GetDefaultInputStreamParameters() { |
| 489 return audio_manager() |
| 490 ->GetInputStreamParameters(AudioManagerBase::kDefaultDeviceId); |
| 491 } |
| 492 |
| 493 AudioParameters GetDefaultOutputStreamParameters() { |
| 494 return audio_manager()->GetDefaultOutputStreamParameters(); |
| 495 } |
| 496 |
| 497 #define START_STREAM_AND_WAIT_FOR_EVENT(stream, dir) \ |
| 498 base::WaitableEvent event(false, false); \ |
| 499 io_callbacks_.set_##dir##_callback_limit(&event, num_callbacks); \ |
| 500 EXPECT_TRUE(stream->Open()); \ |
| 501 stream->Start(&io_callbacks_); \ |
| 502 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); \ |
| 503 stream->Stop(); \ |
| 504 stream->Close(); \ |
| 505 EXPECT_GE(io_callbacks_.dir##_callbacks(), num_callbacks); \ |
| 506 EXPECT_LE(io_callbacks_.dir##_callbacks(), num_callbacks + 2); \ |
| 507 EXPECT_EQ(io_callbacks_.dir##_errors(), 0); \ |
| 508 LOG(INFO) << "expected time between callbacks: " \ |
| 509 << time_between_callbacks_ms << " ms"; \ |
| 510 double actual_time_between_callbacks_ms = \ |
| 511 io_callbacks_.average_time_between_##dir##_callbacks_ms(); \ |
| 512 LOG(INFO) << "actual time between callbacks: " \ |
| 513 << actual_time_between_callbacks_ms << " ms"; \ |
| 514 EXPECT_GE(actual_time_between_callbacks_ms, \ |
| 515 0.70 * time_between_callbacks_ms); \ |
| 516 EXPECT_LE(actual_time_between_callbacks_ms, 1.30 * time_between_callbacks_ms) |
| 517 |
| 518 void StartInputStreamCallbacks(const AudioParameters& params) { |
| 519 double time_between_callbacks_ms = ExpectedTimeBetweenCallbacks(params); |
| 520 const int num_callbacks = (2000.0 / time_between_callbacks_ms); |
| 521 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 522 params, AudioManagerBase::kDefaultDeviceId); |
| 523 EXPECT_TRUE(ais); |
| 524 START_STREAM_AND_WAIT_FOR_EVENT(ais, input); |
| 525 } |
| 526 |
| 527 void StartOutputStreamCallbacks(const AudioParameters& params) { |
| 528 double time_between_callbacks_ms = ExpectedTimeBetweenCallbacks(params); |
| 529 const int num_callbacks = (2000.0 / time_between_callbacks_ms); |
| 530 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 531 params, std::string(), std::string()); |
| 532 EXPECT_TRUE(aos); |
| 533 START_STREAM_AND_WAIT_FOR_EVENT(aos, output); |
| 534 } |
| 535 |
| 536 #undef START_STREAM_AND_WAIT_FOR_EVENT |
| 537 |
| 538 protected: |
| 539 base::MessageLoopForUI message_loop_; |
| 540 scoped_ptr<AudioManager> audio_manager_; |
| 541 MockAudioInputOutputCallbacks io_callbacks_; |
| 542 |
| 543 DISALLOW_COPY_AND_ASSIGN(AudioAndroidTest); |
| 544 }; |
| 545 |
| 546 // Get the default audio input parameters and log the result. |
| 547 TEST_F(AudioAndroidTest, GetInputStreamParameters) { |
| 548 AudioParameters params = GetDefaultInputStreamParameters(); |
| 549 EXPECT_TRUE(params.IsValid()); |
| 550 LOG(INFO) << params; |
| 551 } |
| 552 |
| 553 // Get the default audio output parameters and log the result. |
| 554 TEST_F(AudioAndroidTest, GetDefaultOutputStreamParameters) { |
| 555 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 556 EXPECT_TRUE(params.IsValid()); |
| 557 LOG(INFO) << params; |
| 558 } |
| 559 |
| 560 // Check if low-latency output is supported and log the result as output. |
| 561 TEST_F(AudioAndroidTest, IsAudioLowLatencySupported) { |
| 562 AudioManagerAndroid* manager = |
| 563 static_cast<AudioManagerAndroid*>(audio_manager()); |
| 564 bool low_latency = manager->IsAudioLowLatencySupported(); |
| 565 low_latency ? LOG(INFO) << "Low latency output is supported" |
| 566 : LOG(INFO) << "Low latency output is *not* supported"; |
| 567 } |
| 568 |
| 569 // Ensure that a default input stream can be created and closed. |
| 570 TEST_F(AudioAndroidTest, CreateAndCloseInputStream) { |
| 571 AudioParameters params = GetDefaultInputStreamParameters(); |
| 572 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 573 params, AudioManagerBase::kDefaultDeviceId); |
| 574 EXPECT_TRUE(ais); |
| 575 ais->Close(); |
| 576 } |
| 577 |
| 578 // Ensure that a default output stream can be created and closed. |
| 579 // TODO(henrika): should we also verify that this API changes the audio mode |
| 580 // to communication mode, and calls RegisterHeadsetReceiver, the first time |
| 581 // it is called? |
| 582 TEST_F(AudioAndroidTest, CreateAndCloseOutputStream) { |
| 583 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 584 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 585 params, std::string(), std::string()); |
| 586 EXPECT_TRUE(aos); |
| 587 aos->Close(); |
| 588 } |
| 589 |
| 590 // Ensure that a default input stream can be opened and closed. |
| 591 TEST_F(AudioAndroidTest, OpenAndCloseInputStream) { |
| 592 AudioParameters params = GetDefaultInputStreamParameters(); |
| 593 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 594 params, AudioManagerBase::kDefaultDeviceId); |
| 595 EXPECT_TRUE(ais); |
| 596 EXPECT_TRUE(ais->Open()); |
| 597 ais->Close(); |
| 598 } |
| 599 |
| 600 // Ensure that a default output stream can be opened and closed. |
| 601 TEST_F(AudioAndroidTest, OpenAndCloseOutputStream) { |
| 602 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 603 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 604 params, std::string(), std::string()); |
| 605 EXPECT_TRUE(aos); |
| 606 EXPECT_TRUE(aos->Open()); |
| 607 aos->Close(); |
| 608 } |
| 609 |
| 610 // Start input streaming using default input parameters and ensure that the |
| 611 // callback sequence is sane. |
| 612 TEST_F(AudioAndroidTest, StartInputStreamCallbacks) { |
| 613 AudioParameters params = GetDefaultInputStreamParameters(); |
| 614 StartInputStreamCallbacks(params); |
| 615 } |
| 616 |
| 617 // Start input streaming using non default input parameters and ensure that the |
| 618 // callback sequence is sane. The only change we make in this test is to select |
| 619 // a 10ms buffer size instead of the default size. |
| 620 // TODO(henrika): possibly add support for more variations. |
| 621 TEST_F(AudioAndroidTest, StartInputStreamCallbacksNonDefaultParameters) { |
| 622 AudioParameters native_params = GetDefaultInputStreamParameters(); |
| 623 AudioParameters params(native_params.format(), |
| 624 native_params.channel_layout(), |
| 625 native_params.sample_rate(), |
| 626 native_params.bits_per_sample(), |
| 627 native_params.sample_rate() / 100); |
| 628 StartInputStreamCallbacks(params); |
| 629 } |
| 630 |
| 631 // Start output streaming using default output parameters and ensure that the |
| 632 // callback sequence is sane. |
| 633 TEST_F(AudioAndroidTest, StartOutputStreamCallbacks) { |
| 634 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 635 StartOutputStreamCallbacks(params); |
| 636 } |
| 637 |
| 638 // Start output streaming using non default output parameters and ensure that |
| 639 // the callback sequence is sane. The only changed we make in this test is to |
| 640 // select a 10ms buffer size instead of the default size and to open up the |
| 641 // device in mono. |
| 642 // TODO(henrika): possibly add support for more variations. |
| 643 TEST_F(AudioAndroidTest, StartOutputStreamCallbacksNonDefaultParameters) { |
| 644 AudioParameters native_params = GetDefaultOutputStreamParameters(); |
| 645 AudioParameters params(native_params.format(), |
| 646 CHANNEL_LAYOUT_MONO, |
| 647 native_params.sample_rate(), |
| 648 native_params.bits_per_sample(), |
| 649 native_params.sample_rate() / 100); |
| 650 StartOutputStreamCallbacks(params); |
| 651 } |
| 652 |
| 653 // Play out a PCM file segment in real time and allow the user to verify that |
| 654 // the rendered audio sounds OK. |
| 655 // NOTE: this test requires user interaction and is not designed to run as an |
| 656 // automatized test on bots. |
| 657 TEST_F(AudioAndroidTest, DISABLED_RunOutputStreamWithFileAsSource) { |
| 658 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 659 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 660 params, std::string(), std::string()); |
| 661 EXPECT_TRUE(aos); |
| 662 |
| 663 // PrintAudioParameters(params); |
| 664 // fflush(stdout); |
| 665 |
| 666 std::string file_name; |
| 667 if (params.sample_rate() == 48000 && params.channels() == 2) { |
| 668 file_name = kSpeechFile_16b_s_48k; |
| 669 } else if (params.sample_rate() == 48000 && params.channels() == 1) { |
| 670 file_name = kSpeechFile_16b_m_48k; |
| 671 } else if (params.sample_rate() == 44100 && params.channels() == 2) { |
| 672 file_name = kSpeechFile_16b_s_44k; |
| 673 } else if (params.sample_rate() == 44100 && params.channels() == 1) { |
| 674 file_name = kSpeechFile_16b_m_44k; |
| 675 } else { |
| 676 FAIL() << "This test supports 44.1kHz and 48kHz mono/stereo only."; |
| 677 return; |
| 678 } |
| 679 |
| 680 base::WaitableEvent event(false, false); |
| 681 FileAudioSource source(&event, file_name); |
| 682 |
| 683 EXPECT_TRUE(aos->Open()); |
| 684 aos->SetVolume(1.0); |
| 685 aos->Start(&source); |
| 686 printf(">> Verify that file is played out correctly"); |
| 687 fflush(stdout); |
| 688 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); |
| 689 printf("\n"); |
| 690 aos->Stop(); |
| 691 aos->Close(); |
| 692 } |
| 693 |
| 694 // Start input streaming and run it for ten seconds while recording to a |
| 695 // local audio file. |
| 696 // NOTE: this test requires user interaction and is not designed to run as an |
| 697 // automatized test on bots. |
| 698 TEST_F(AudioAndroidTest, DISABLED_RunSimplexInputStreamWithFileAsSink) { |
| 699 AudioParameters params = GetDefaultInputStreamParameters(); |
| 700 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 701 params, AudioManagerBase::kDefaultDeviceId); |
| 702 EXPECT_TRUE(ais); |
| 703 |
| 704 // PrintAudioParameters(params); |
| 705 // fflush(stdout); |
| 706 |
| 707 std::string file_name = base::StringPrintf("out_simplex_%d_%d_%d.pcm", |
| 708 params.sample_rate(), |
| 709 params.frames_per_buffer(), |
| 710 params.channels()); |
| 711 |
| 712 base::WaitableEvent event(false, false); |
| 713 FileAudioSink sink(&event, params, file_name); |
| 714 |
| 715 EXPECT_TRUE(ais->Open()); |
| 716 ais->Start(&sink); |
| 717 printf(">> Speak into the microphone to record audio"); |
| 718 fflush(stdout); |
| 719 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); |
| 720 printf("\n"); |
| 721 ais->Stop(); |
| 722 ais->Close(); |
| 723 } |
| 724 |
| 725 // Same test as RunSimplexInputStreamWithFileAsSink but this time output |
| 726 // streaming is active as well (reads zeros only). |
| 727 // NOTE: this test requires user interaction and is not designed to run as an |
| 728 // automatized test on bots. |
| 729 TEST_F(AudioAndroidTest, DISABLED_RunDuplexInputStreamWithFileAsSink) { |
| 730 AudioParameters in_params = GetDefaultInputStreamParameters(); |
| 731 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 732 in_params, AudioManagerBase::kDefaultDeviceId); |
| 733 EXPECT_TRUE(ais); |
| 734 |
| 735 // PrintAudioParameters(in_params); |
| 736 // fflush(stdout); |
| 737 |
| 738 AudioParameters out_params = |
| 739 audio_manager()->GetDefaultOutputStreamParameters(); |
| 740 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 741 out_params, std::string(), std::string()); |
| 742 EXPECT_TRUE(aos); |
| 743 |
| 744 // PrintAudioParameters(out_params); |
| 745 // fflush(stdout); |
| 746 |
| 747 std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm", |
| 748 in_params.sample_rate(), |
| 749 in_params.frames_per_buffer(), |
| 750 in_params.channels()); |
| 751 |
| 752 base::WaitableEvent event(false, false); |
| 753 FileAudioSink sink(&event, in_params, file_name); |
| 754 |
| 755 EXPECT_TRUE(ais->Open()); |
| 756 EXPECT_TRUE(aos->Open()); |
| 757 ais->Start(&sink); |
| 758 aos->Start(&io_callbacks_); |
| 759 printf(">> Speak into the microphone to record audio"); |
| 760 fflush(stdout); |
| 761 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); |
| 762 printf("\n"); |
| 763 aos->Stop(); |
| 764 ais->Stop(); |
| 765 aos->Close(); |
| 766 ais->Close(); |
| 767 } |
| 768 |
| 769 // Start audio in both directions while feeding captured data into a FIFO so |
| 770 // it can be read directly (in loopback) by the render side. A small extra |
| 771 // delay will be added by the FIFO and an estimate of this delay will be |
| 772 // printed out during the test. |
| 773 // NOTE: this test requires user interaction and is not designed to run as an |
| 774 // automatized test on bots. |
| 775 TEST_F(AudioAndroidTest, |
| 776 DISABLED_RunSymmetricInputAndOutputStreamsInFullDuplex) { |
| 777 // Get native audio parameters for the input side. |
| 778 AudioParameters default_input_params = GetDefaultInputStreamParameters(); |
| 779 |
| 780 // Modify the parameters so that both input and output can use the same |
| 781 // parameters by selecting 10ms as buffer size. This will also ensure that |
| 782 // the output stream will be a mono stream since mono is default for input |
| 783 // audio on Android. |
| 784 AudioParameters io_params(default_input_params.format(), |
| 785 default_input_params.channel_layout(), |
| 786 default_input_params.sample_rate(), |
| 787 default_input_params.bits_per_sample(), |
| 788 default_input_params.sample_rate() / 100); |
| 789 // PrintAudioParameters(io_params); |
| 790 // fflush(stdout); |
| 791 |
| 792 // Create input and output streams using the common audio parameters. |
| 793 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 794 io_params, AudioManagerBase::kDefaultDeviceId); |
| 795 EXPECT_TRUE(ais); |
| 796 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 797 io_params, std::string(), std::string()); |
| 798 EXPECT_TRUE(aos); |
| 799 |
| 800 FullDuplexAudioSinkSource full_duplex(io_params); |
| 801 |
| 802 // Start a full duplex audio session and print out estimates of the extra |
| 803 // delay we should expect from the FIFO. If real-time delay measurements are |
| 804 // performed, the result should be reduced by this extra delay since it is |
| 805 // something that has been added by the test. |
| 806 EXPECT_TRUE(ais->Open()); |
| 807 EXPECT_TRUE(aos->Open()); |
| 808 ais->Start(&full_duplex); |
| 809 aos->Start(&full_duplex); |
| 810 printf( |
| 811 "HINT: an estimate of the extra FIFO delay will be updated once per " |
| 812 "second during this test.\n"); |
| 813 printf(">> Speak into the mic and listen to the audio in loopback...\n"); |
| 814 fflush(stdout); |
| 815 base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(20)); |
| 816 printf("\n"); |
| 817 aos->Stop(); |
| 818 ais->Stop(); |
| 819 aos->Close(); |
| 820 ais->Close(); |
| 821 } |
| 822 |
| 823 } // namespace media |
OLD | NEW |