| OLD | NEW |
| (Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/basictypes.h" |
| 6 #include "base/file_util.h" |
| 7 #include "base/memory/scoped_ptr.h" |
| 8 #include "base/message_loop/message_loop.h" |
| 9 #include "base/path_service.h" |
| 10 #include "base/strings/stringprintf.h" |
| 11 #include "base/synchronization/lock.h" |
| 12 #include "base/synchronization/waitable_event.h" |
| 13 #include "base/test/test_timeouts.h" |
| 14 #include "base/time/time.h" |
| 15 #include "build/build_config.h" |
| 16 #include "media/audio/android/audio_manager_android.h" |
| 17 #include "media/audio/audio_io.h" |
| 18 #include "media/audio/audio_manager_base.h" |
| 19 #include "media/base/decoder_buffer.h" |
| 20 #include "media/base/seekable_buffer.h" |
| 21 #include "media/base/test_data_util.h" |
| 22 #include "testing/gtest/include/gtest/gtest.h" |
| 23 |
| 24 namespace media { |
| 25 |
| 26 static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw"; |
| 27 static const char kSpeechFile_16b_m_48k[] = "speech_16b_mono_48kHz.raw"; |
| 28 static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw"; |
| 29 static const char kSpeechFile_16b_m_44k[] = "speech_16b_mono_44kHz.raw"; |
| 30 |
| 31 static const int kBitsPerSample = 16; |
| 32 |
| 33 // TODO(henrika): add commens... |
| 34 class MockAudioInputOutputCallbacks |
| 35 : public AudioInputStream::AudioInputCallback, |
| 36 public AudioOutputStream::AudioSourceCallback { |
| 37 public: |
| 38 MockAudioInputOutputCallbacks() |
| 39 : input_callbacks_(0), |
| 40 output_callbacks_(0), |
| 41 input_callback_limit_(-1), |
| 42 output_callback_limit_(-1), |
| 43 input_errors_(0), |
| 44 output_errors_(0) {}; |
| 45 virtual ~MockAudioInputOutputCallbacks() {}; |
| 46 |
| 47 // Implementation of AudioInputCallback. |
| 48 virtual void OnData(AudioInputStream* stream, const uint8* src, |
| 49 uint32 size, uint32 hardware_delay_bytes, |
| 50 double volume) OVERRIDE { |
| 51 // int thread_id = static_cast<int>(base::PlatformThread::CurrentId()); |
| 52 // DVLOG(1) << "##" << thread_id; |
| 53 |
| 54 if (input_callbacks_ == 0) |
| 55 input_start_time_ = base::TimeTicks::Now(); |
| 56 |
| 57 input_callbacks_++; |
| 58 |
| 59 if (input_callback_limit_ > 0 && |
| 60 input_callbacks_ == input_callback_limit_) { |
| 61 input_end_time_ = base::TimeTicks::Now(); |
| 62 input_event_->Signal(); |
| 63 } |
| 64 }; |
| 65 virtual void OnClose(AudioInputStream* stream) OVERRIDE {} |
| 66 virtual void OnError(AudioInputStream* stream) OVERRIDE { |
| 67 input_errors_++; |
| 68 } |
| 69 |
| 70 // Add comments.... |
| 71 virtual int OnMoreData(AudioBus* dest, |
| 72 AudioBuffersState buffers_state) OVERRIDE { |
| 73 // DVLOG(1) << "--- OnMoreData ---"; |
| 74 if (output_callbacks_ == 0) |
| 75 output_start_time_ = base::TimeTicks::Now(); |
| 76 |
| 77 output_callbacks_++; |
| 78 |
| 79 if (output_callback_limit_ > 0 && |
| 80 output_callbacks_ == output_callback_limit_) { |
| 81 output_end_time_ = base::TimeTicks::Now(); |
| 82 output_event_->Signal(); |
| 83 } |
| 84 |
| 85 dest->Zero(); |
| 86 return dest->frames(); |
| 87 } |
| 88 |
| 89 virtual int OnMoreIOData(AudioBus* source, |
| 90 AudioBus* dest, |
| 91 AudioBuffersState buffers_state) { |
| 92 NOTREACHED(); |
| 93 return 0; |
| 94 } |
| 95 |
| 96 virtual void OnError(AudioOutputStream* stream) OVERRIDE { |
| 97 output_errors_++; |
| 98 } |
| 99 |
| 100 int input_callbacks() { return input_callbacks_; } |
| 101 void set_input_callback_limit(base::WaitableEvent* event, |
| 102 int input_callback_limit) { |
| 103 input_event_ = event; |
| 104 input_callback_limit_ = input_callback_limit; |
| 105 } |
| 106 int input_errors() { return input_errors_; } |
| 107 base::TimeTicks input_start_time() { return input_start_time_; } |
| 108 base::TimeTicks input_end_time() { return input_end_time_; } |
| 109 |
| 110 int output_callbacks() { return output_callbacks_; } |
| 111 void set_output_callback_limit(base::WaitableEvent* event, |
| 112 int output_callback_limit) { |
| 113 output_event_ = event; |
| 114 output_callback_limit_ = output_callback_limit; |
| 115 } |
| 116 int output_errors() { return output_errors_; } |
| 117 base::TimeTicks output_start_time() { return output_start_time_; } |
| 118 base::TimeTicks output_end_time() { return output_end_time_; } |
| 119 |
| 120 private: |
| 121 int input_callbacks_; |
| 122 int output_callbacks_; |
| 123 int input_callback_limit_; |
| 124 int output_callback_limit_; |
| 125 int input_errors_; |
| 126 int output_errors_; |
| 127 base::TimeTicks input_start_time_; |
| 128 base::TimeTicks output_start_time_; |
| 129 base::TimeTicks input_end_time_; |
| 130 base::TimeTicks output_end_time_; |
| 131 base::WaitableEvent* input_event_; |
| 132 base::WaitableEvent* output_event_; |
| 133 |
| 134 DISALLOW_COPY_AND_ASSIGN(MockAudioInputOutputCallbacks); |
| 135 }; |
| 136 |
| 137 // Implements AudioOutputStream::AudioSourceCallback and provides audio data |
| 138 // by reading from a data file. |
| 139 class FileAudioSource : public AudioOutputStream::AudioSourceCallback { |
| 140 public: |
| 141 explicit FileAudioSource(base::WaitableEvent* event, const std::string& name) |
| 142 : event_(event), |
| 143 pos_(0), |
| 144 previous_marker_time_(base::TimeTicks::Now()) { |
| 145 // Reads a test file from media/test/data directory and stores it in |
| 146 // a DecoderBuffer. |
| 147 file_ = ReadTestDataFile(name); |
| 148 |
| 149 // Log the name of the file which is used as input for this test. |
| 150 base::FilePath file_path = GetTestDataFilePath(name); |
| 151 printf("Reading from file: %s\n", file_path.value().c_str()); |
| 152 fflush(stdout); |
| 153 } |
| 154 |
| 155 virtual ~FileAudioSource() {} |
| 156 |
| 157 // AudioOutputStream::AudioSourceCallback implementation. |
| 158 |
| 159 // Use samples read from a data file and fill up the audio buffer |
| 160 // provided to us in the callback. |
| 161 virtual int OnMoreData(AudioBus* audio_bus, |
| 162 AudioBuffersState buffers_state) { |
| 163 // Add a '.'-marker once every second. |
| 164 const base::TimeTicks now_time = base::TimeTicks::Now(); |
| 165 const int diff = (now_time - previous_marker_time_).InMilliseconds(); |
| 166 if (diff > 1000) { |
| 167 printf("."); |
| 168 fflush(stdout); |
| 169 previous_marker_time_ = now_time; |
| 170 } |
| 171 |
| 172 int max_size = |
| 173 audio_bus->frames() * audio_bus->channels() * kBitsPerSample / 8; |
| 174 |
| 175 bool stop_playing = false; |
| 176 |
| 177 // Adjust data size and prepare for end signal if file has ended. |
| 178 if (pos_ + static_cast<int>(max_size) > file_size()) { |
| 179 stop_playing = true; |
| 180 max_size = file_size() - pos_; |
| 181 } |
| 182 |
| 183 // File data is stored as interleaved 16-bit values. Copy data samples from |
| 184 // the file and deinterleave to match the audio bus format. |
| 185 // FromInterleaved() will zero out any unfilled frames when there is not |
| 186 // sufficient data remaining in the file to fill up the complete frame. |
| 187 int frames = max_size / (audio_bus->channels() * kBitsPerSample / 8); |
| 188 if (max_size) { |
| 189 audio_bus->FromInterleaved( |
| 190 file_->data() + pos_, frames, kBitsPerSample / 8); |
| 191 pos_ += max_size; |
| 192 } |
| 193 |
| 194 // Set event to ensure that the test can stop when the file has ended. |
| 195 if (stop_playing) |
| 196 event_->Signal(); |
| 197 |
| 198 return frames; |
| 199 } |
| 200 |
| 201 virtual int OnMoreIOData(AudioBus* source, |
| 202 AudioBus* dest, |
| 203 AudioBuffersState buffers_state) OVERRIDE { |
| 204 NOTREACHED(); |
| 205 return 0; |
| 206 } |
| 207 |
| 208 virtual void OnError(AudioOutputStream* stream) {} |
| 209 |
| 210 int file_size() { return file_->data_size(); } |
| 211 |
| 212 private: |
| 213 base::WaitableEvent* event_; |
| 214 int pos_; |
| 215 scoped_refptr<DecoderBuffer> file_; |
| 216 base::TimeTicks previous_marker_time_; |
| 217 |
| 218 DISALLOW_COPY_AND_ASSIGN(FileAudioSource); |
| 219 }; |
| 220 |
| 221 // Implements AudioInputStream::AudioInputCallback and writes the recorded |
| 222 // audio data to a local output file. |
| 223 class FileAudioSink : public AudioInputStream::AudioInputCallback { |
| 224 public: |
| 225 explicit FileAudioSink(base::WaitableEvent* event, |
| 226 const AudioParameters& params, |
| 227 const std::string& file_name) |
| 228 : event_(event), |
| 229 params_(params), |
| 230 previous_marker_time_(base::TimeTicks::Now()) { |
| 231 // Allocate space for ~10 seconds of data. |
| 232 const int kMaxBufferSize = 10 * params.GetBytesPerSecond(); |
| 233 buffer_.reset(new media::SeekableBuffer(0, kMaxBufferSize)); |
| 234 |
| 235 // Open up the binary file which will be written to in the destructor. |
| 236 base::FilePath file_path; |
| 237 EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &file_path)); |
| 238 file_path = file_path.AppendASCII(file_name.c_str()); |
| 239 binary_file_ = file_util::OpenFile(file_path, "wb"); |
| 240 DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file."; |
| 241 printf("Writing to file : %s ", file_path.value().c_str()); |
| 242 printf("of size %d bytes\n", buffer_->forward_capacity()); |
| 243 fflush(stdout); |
| 244 } |
| 245 |
| 246 virtual ~FileAudioSink() { |
| 247 int bytes_written = 0; |
| 248 while (bytes_written < buffer_->forward_capacity()) { |
| 249 const uint8* chunk; |
| 250 int chunk_size; |
| 251 |
| 252 // Stop writing if no more data is available. |
| 253 if (!buffer_->GetCurrentChunk(&chunk, &chunk_size)) |
| 254 break; |
| 255 |
| 256 // Write recorded data chunk to the file and prepare for next chunk. |
| 257 fwrite(chunk, 1, chunk_size, binary_file_); |
| 258 buffer_->Seek(chunk_size); |
| 259 bytes_written += chunk_size; |
| 260 } |
| 261 file_util::CloseFile(binary_file_); |
| 262 } |
| 263 |
| 264 // AudioInputStream::AudioInputCallback implementation. |
| 265 virtual void OnData(AudioInputStream* stream, |
| 266 const uint8* src, |
| 267 uint32 size, |
| 268 uint32 hardware_delay_bytes, |
| 269 double volume) { |
| 270 // Add a '.'-marker once every second. |
| 271 const base::TimeTicks now_time = base::TimeTicks::Now(); |
| 272 const int diff = (now_time - previous_marker_time_).InMilliseconds(); |
| 273 if (diff > 1000) { |
| 274 printf("."); |
| 275 fflush(stdout); |
| 276 previous_marker_time_ = now_time; |
| 277 } |
| 278 |
| 279 // Store data data in a temporary buffer to avoid making blocking |
| 280 // fwrite() calls in the audio callback. The complete buffer will be |
| 281 // written to file in the destructor. |
| 282 if (!buffer_->Append(src, size)) |
| 283 event_->Signal(); |
| 284 } |
| 285 |
| 286 virtual void OnClose(AudioInputStream* stream) {} |
| 287 virtual void OnError(AudioInputStream* stream) {} |
| 288 |
| 289 private: |
| 290 base::WaitableEvent* event_; |
| 291 AudioParameters params_; |
| 292 scoped_ptr<media::SeekableBuffer> buffer_; |
| 293 FILE* binary_file_; |
| 294 base::TimeTicks previous_marker_time_; |
| 295 |
| 296 DISALLOW_COPY_AND_ASSIGN(FileAudioSink); |
| 297 }; |
| 298 |
| 299 // Implements AudioInputCallback and AudioSourceCallback to support full |
| 300 // duplex audio where captured samples are played out in loopback after |
| 301 // reading from a temporary FIFO storage. |
| 302 class FullDuplexAudioSinkSource |
| 303 : public AudioInputStream::AudioInputCallback, |
| 304 public AudioOutputStream::AudioSourceCallback { |
| 305 public: |
| 306 explicit FullDuplexAudioSinkSource(const AudioParameters& params) |
| 307 : params_(params), |
| 308 previous_marker_time_(base::TimeTicks::Now()), |
| 309 started_(false) { |
| 310 // Start with a reasonably small FIFO size. It will be increased |
| 311 // dynamically during the test if required. |
| 312 fifo_.reset( |
| 313 new media::SeekableBuffer(0, 2 * params.GetBytesPerBuffer())); |
| 314 buffer_.reset(new uint8[params_.GetBytesPerBuffer()]); |
| 315 } |
| 316 |
| 317 virtual ~FullDuplexAudioSinkSource() {} |
| 318 |
| 319 // AudioInputStream::AudioInputCallback implementation |
| 320 virtual void OnData(AudioInputStream* stream, const uint8* src, |
| 321 uint32 size, uint32 hardware_delay_bytes, |
| 322 double volume) OVERRIDE { |
| 323 // Add a '.'-marker once every second. |
| 324 const base::TimeTicks now_time = base::TimeTicks::Now(); |
| 325 const int diff = (now_time - previous_marker_time_).InMilliseconds(); |
| 326 |
| 327 base::AutoLock lock(lock_); |
| 328 if (diff > 1000) { |
| 329 started_ = true; |
| 330 previous_marker_time_ = now_time; |
| 331 |
| 332 // Print out the extra delay added by the FIFO. This is a best effort |
| 333 // estimate. We might be +- 10ms off here. |
| 334 int extra_fio_delay = static_cast<int>( |
| 335 BytesToMilliseconds(fifo_->forward_bytes() + size)); |
| 336 printf("%d ", extra_fio_delay); |
| 337 fflush(stdout); |
| 338 } |
| 339 |
| 340 // We add an inital delay of ~1 second before loopback starts to ensure |
| 341 // a stable callback sequcence and to avoid inital burts which might add |
| 342 // to the extra FIFO delay. |
| 343 if (!started_) |
| 344 return; |
| 345 |
| 346 if (!fifo_->Append(src, size)) { |
| 347 fifo_->set_forward_capacity(2 * fifo_->forward_capacity()); |
| 348 } |
| 349 } |
| 350 |
| 351 virtual void OnClose(AudioInputStream* stream) OVERRIDE {} |
| 352 virtual void OnError(AudioInputStream* stream) OVERRIDE {} |
| 353 |
| 354 // AudioOutputStream::AudioSourceCallback implementation |
| 355 virtual int OnMoreData(AudioBus* dest, |
| 356 AudioBuffersState buffers_state) OVERRIDE { |
| 357 const int size_in_bytes = |
| 358 (kBitsPerSample / 8) * dest->frames() * dest->channels(); |
| 359 EXPECT_EQ(size_in_bytes, params_.GetBytesPerBuffer()); |
| 360 |
| 361 base::AutoLock lock(lock_); |
| 362 |
| 363 // We add an inital delay of ~1 second before loopback starts to ensure |
| 364 // a stable callback sequcence and to avoid inital burts which might add |
| 365 // to the extra FIFO delay. |
| 366 if (!started_) { |
| 367 dest->Zero(); |
| 368 return dest->frames(); |
| 369 } |
| 370 |
| 371 // Fill up destionation with zeros if the FIFO does not contain enough |
| 372 // data to fulfill the request. |
| 373 if (fifo_->forward_bytes() < size_in_bytes) { |
| 374 dest->Zero(); |
| 375 } else { |
| 376 fifo_->Read(buffer_.get(), size_in_bytes); |
| 377 dest->FromInterleaved( |
| 378 buffer_.get(), dest->frames(), kBitsPerSample / 8); |
| 379 } |
| 380 |
| 381 return dest->frames(); |
| 382 } |
| 383 virtual int OnMoreIOData(AudioBus* source, |
| 384 AudioBus* dest, |
| 385 AudioBuffersState buffers_state) OVERRIDE { |
| 386 NOTREACHED(); |
| 387 return 0; |
| 388 } |
| 389 virtual void OnError(AudioOutputStream* stream) OVERRIDE {} |
| 390 |
| 391 private: |
| 392 // Converts from bytes to milliseconds given number of bytes and existing |
| 393 // audio parameters. |
| 394 double BytesToMilliseconds(int bytes) const { |
| 395 const int frames = bytes / params_.GetBytesPerFrame(); |
| 396 return (base::TimeDelta::FromMicroseconds( |
| 397 frames * base::Time::kMicrosecondsPerSecond / |
| 398 static_cast<float>(params_.sample_rate()))).InMillisecondsF(); |
| 399 } |
| 400 |
| 401 AudioParameters params_; |
| 402 base::TimeTicks previous_marker_time_; |
| 403 base::Lock lock_; |
| 404 scoped_ptr<media::SeekableBuffer> fifo_; |
| 405 scoped_ptr<uint8[]> buffer_; |
| 406 bool started_; |
| 407 |
| 408 DISALLOW_COPY_AND_ASSIGN(FullDuplexAudioSinkSource); |
| 409 }; |
| 410 |
| 411 // Test fixture class. |
| 412 class AudioAndroidTest : public testing::Test { |
| 413 public: |
| 414 AudioAndroidTest() |
| 415 : audio_manager_(AudioManager::Create()) {} |
| 416 |
| 417 virtual ~AudioAndroidTest() {} |
| 418 |
| 419 AudioManager* audio_manager() { return audio_manager_.get(); } |
| 420 |
| 421 // Convenience method which ensures that we are not running on the build |
| 422 // bots and that at least one valid input and output device can be found. |
| 423 bool CanRunAudioTests() { |
| 424 bool input = audio_manager()->HasAudioInputDevices(); |
| 425 bool output = audio_manager()->HasAudioOutputDevices(); |
| 426 LOG_IF(WARNING, !input) << "No input device detected."; |
| 427 LOG_IF(WARNING, !output) << "No output device detected."; |
| 428 return input && output; |
| 429 } |
| 430 |
| 431 // Converts AudioParameters::Format enumerator to readable string. |
| 432 std::string FormatToString(AudioParameters::Format format) { |
| 433 if (format == AudioParameters::AUDIO_PCM_LINEAR) |
| 434 return std::string("AUDIO_PCM_LINEAR"); |
| 435 else if (format == AudioParameters::AUDIO_PCM_LOW_LATENCY) |
| 436 return std::string("AUDIO_PCM_LOW_LATENCY"); |
| 437 else if (format == AudioParameters::AUDIO_FAKE) |
| 438 return std::string("AUDIO_FAKE"); |
| 439 else if (format == AudioParameters::AUDIO_LAST_FORMAT) |
| 440 return std::string("AUDIO_LAST_FORMAT"); |
| 441 else |
| 442 return std::string(); |
| 443 } |
| 444 |
| 445 // Converts ChannelLayout enumerator to readable string. Does not include |
| 446 // multi-channel cases since these layouts are not supported on Android. |
| 447 std::string ChannelLayoutToString(ChannelLayout channel_layout) { |
| 448 if (channel_layout == CHANNEL_LAYOUT_NONE) |
| 449 return std::string("CHANNEL_LAYOUT_NONE"); |
| 450 else if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) |
| 451 return std::string("CHANNEL_LAYOUT_UNSUPPORTED"); |
| 452 else if (channel_layout == CHANNEL_LAYOUT_MONO) |
| 453 return std::string("CHANNEL_LAYOUT_MONO"); |
| 454 else if (channel_layout == CHANNEL_LAYOUT_STEREO) |
| 455 return std::string("CHANNEL_LAYOUT_STEREO"); |
| 456 else |
| 457 return std::string("CHANNEL_LAYOUT_UNSUPPORTED"); |
| 458 } |
| 459 |
| 460 void PrintAudioParameters(AudioParameters params) { |
| 461 printf("format : %s\n", FormatToString(params.format()).c_str()); |
| 462 printf("channel_layout : %s\n", |
| 463 ChannelLayoutToString(params.channel_layout()).c_str()); |
| 464 printf("sample_rate : %d\n", params.sample_rate()); |
| 465 printf("bits_per_sample : %d\n", params.bits_per_sample()); |
| 466 printf("frames_per_buffer: %d\n", params.frames_per_buffer()); |
| 467 printf("channels : %d\n", params.channels()); |
| 468 printf("bytes per buffer : %d\n", params.GetBytesPerBuffer()); |
| 469 printf("bytes per second : %d\n", params.GetBytesPerSecond()); |
| 470 printf("bytes per frame : %d\n", params.GetBytesPerFrame()); |
| 471 printf("frame size in ms : %.2f\n", TimeBetweenCallbacks(params)); |
| 472 } |
| 473 |
| 474 AudioParameters GetDefaultInputStreamParameters() { |
| 475 return audio_manager()->GetInputStreamParameters( |
| 476 AudioManagerBase::kDefaultDeviceId); |
| 477 } |
| 478 |
| 479 AudioParameters GetDefaultOutputStreamParameters() { |
| 480 return audio_manager()->GetDefaultOutputStreamParameters(); |
| 481 } |
| 482 |
| 483 double TimeBetweenCallbacks(AudioParameters params) const { |
| 484 return (base::TimeDelta::FromMicroseconds( |
| 485 params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond / |
| 486 static_cast<float>(params.sample_rate()))).InMillisecondsF(); |
| 487 } |
| 488 |
| 489 #define START_STREAM_AND_WAIT_FOR_EVENT(stream) \ |
| 490 EXPECT_TRUE(stream->Open()); \ |
| 491 stream->Start(&io_callbacks_); \ |
| 492 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); \ |
| 493 stream->Stop(); \ |
| 494 stream->Close() |
| 495 |
| 496 void StartInputStreamCallbacks(const AudioParameters& params) { |
| 497 double time_between_callbacks_ms = TimeBetweenCallbacks(params); |
| 498 const int num_callbacks = (1000.0 / time_between_callbacks_ms); |
| 499 |
| 500 base::WaitableEvent event(false, false); |
| 501 io_callbacks_.set_input_callback_limit(&event, num_callbacks); |
| 502 |
| 503 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 504 params, AudioManagerBase::kDefaultDeviceId); |
| 505 EXPECT_TRUE(ais); |
| 506 START_STREAM_AND_WAIT_FOR_EVENT(ais); |
| 507 |
| 508 EXPECT_GE(io_callbacks_.input_callbacks(), num_callbacks - 1); |
| 509 EXPECT_LE(io_callbacks_.input_callbacks(), num_callbacks + 1); |
| 510 EXPECT_EQ(io_callbacks_.input_errors(), 0); |
| 511 |
| 512 double actual_time_between_callbacks_ms = ( |
| 513 (io_callbacks_.input_end_time() - io_callbacks_.input_start_time()) / |
| 514 (io_callbacks_.input_callbacks() - 1)).InMillisecondsF(); |
| 515 printf("time between callbacks: %.2fms\n", time_between_callbacks_ms); |
| 516 printf("actual time between callbacks: %.2fms\n", |
| 517 actual_time_between_callbacks_ms); |
| 518 EXPECT_GE(actual_time_between_callbacks_ms, |
| 519 0.75 * time_between_callbacks_ms); |
| 520 EXPECT_LE(actual_time_between_callbacks_ms, |
| 521 1.25 * time_between_callbacks_ms); |
| 522 } |
| 523 |
| 524 void StartOutputStreamCallbacks(const AudioParameters& params) { |
| 525 double time_between_callbacks_ms = TimeBetweenCallbacks(params); |
| 526 const int num_callbacks = (1000.0 / time_between_callbacks_ms); |
| 527 |
| 528 base::WaitableEvent event(false, false); |
| 529 io_callbacks_.set_output_callback_limit(&event, num_callbacks); |
| 530 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 531 params, std::string()); |
| 532 EXPECT_TRUE(aos); |
| 533 START_STREAM_AND_WAIT_FOR_EVENT(aos); |
| 534 |
| 535 EXPECT_GE(io_callbacks_.output_callbacks(), num_callbacks - 1); |
| 536 EXPECT_LE(io_callbacks_.output_callbacks(), num_callbacks + 1); |
| 537 EXPECT_EQ(io_callbacks_.output_errors(), 0); |
| 538 |
| 539 double actual_time_between_callbacks_ms = ( |
| 540 (io_callbacks_.output_end_time() - io_callbacks_.output_start_time()) / |
| 541 (io_callbacks_.output_callbacks() - 1)).InMillisecondsF(); |
| 542 printf("time between callbacks: %.2fms\n", time_between_callbacks_ms); |
| 543 printf("actual time between callbacks: %.2fms\n", |
| 544 actual_time_between_callbacks_ms); |
| 545 EXPECT_GE(actual_time_between_callbacks_ms, |
| 546 0.75 * time_between_callbacks_ms); |
| 547 EXPECT_LE(actual_time_between_callbacks_ms, |
| 548 1.25 * time_between_callbacks_ms); |
| 549 } |
| 550 |
| 551 #undef START_STREAM_AND_WAIT_FOR_EVENT |
| 552 |
| 553 protected: |
| 554 base::MessageLoopForUI message_loop_; |
| 555 scoped_ptr<AudioManager> audio_manager_; |
| 556 MockAudioInputOutputCallbacks io_callbacks_; |
| 557 |
| 558 DISALLOW_COPY_AND_ASSIGN(AudioAndroidTest); |
| 559 }; |
| 560 |
| 561 // Get the default audio input parameters and log the result. |
| 562 TEST_F(AudioAndroidTest, GetInputStreamParameters) { |
| 563 if (!CanRunAudioTests()) |
| 564 return; |
| 565 AudioParameters params = GetDefaultInputStreamParameters(); |
| 566 EXPECT_TRUE(params.IsValid()); |
| 567 PrintAudioParameters(params); |
| 568 } |
| 569 |
| 570 // Get the default audio output parameters and log the result. |
| 571 TEST_F(AudioAndroidTest, GetDefaultOutputStreamParameters) { |
| 572 if (!CanRunAudioTests()) |
| 573 return; |
| 574 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 575 EXPECT_TRUE(params.IsValid()); |
| 576 PrintAudioParameters(params); |
| 577 } |
| 578 |
| 579 // Check if low-latency output is supported and log the result as output. |
| 580 TEST_F(AudioAndroidTest, IsAudioLowLatencySupported) { |
| 581 if (!CanRunAudioTests()) |
| 582 return; |
| 583 AudioManagerAndroid* manager = |
| 584 static_cast<AudioManagerAndroid*>(audio_manager()); |
| 585 bool low_latency = manager->IsAudioLowLatencySupported(); |
| 586 low_latency ? printf("Low latency output is supported\n") : |
| 587 printf("Low latency output is *not* supported\n"); |
| 588 } |
| 589 |
| 590 // Ensure that a default input stream can be created and closed. |
| 591 TEST_F(AudioAndroidTest, CreateAndCloseInputStream) { |
| 592 if (!CanRunAudioTests()) |
| 593 return; |
| 594 AudioParameters params = GetDefaultInputStreamParameters(); |
| 595 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 596 params, AudioManagerBase::kDefaultDeviceId); |
| 597 EXPECT_TRUE(ais); |
| 598 ais->Close(); |
| 599 } |
| 600 |
| 601 // Ensure that a default output stream can be created and closed. |
| 602 // TODO(henrika): should we also verify that this API changes the audio mode |
| 603 // to communication mode, and calls RegisterHeadsetReceiver, the first time |
| 604 // it is called? |
| 605 TEST_F(AudioAndroidTest, CreateAndCloseOutputStream) { |
| 606 if (!CanRunAudioTests()) |
| 607 return; |
| 608 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 609 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 610 params, std::string()); |
| 611 EXPECT_TRUE(aos); |
| 612 aos->Close(); |
| 613 } |
| 614 |
| 615 // Ensure that a default input stream can be opened and closed. |
| 616 TEST_F(AudioAndroidTest, OpenAndCloseInputStream) { |
| 617 if (!CanRunAudioTests()) |
| 618 return; |
| 619 AudioParameters params = GetDefaultInputStreamParameters(); |
| 620 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 621 params, AudioManagerBase::kDefaultDeviceId); |
| 622 EXPECT_TRUE(ais); |
| 623 EXPECT_TRUE(ais->Open()); |
| 624 ais->Close(); |
| 625 } |
| 626 |
| 627 // Ensure that a default output stream can be opened and closed. |
| 628 TEST_F(AudioAndroidTest, OpenAndCloseOutputStream) { |
| 629 if (!CanRunAudioTests()) |
| 630 return; |
| 631 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 632 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 633 params, std::string()); |
| 634 EXPECT_TRUE(aos); |
| 635 EXPECT_TRUE(aos->Open()); |
| 636 aos->Close(); |
| 637 } |
| 638 |
| 639 // Start input streaming using default input parameters and ensure that the |
| 640 // callback sequence is sane. |
| 641 TEST_F(AudioAndroidTest, StartInputStreamCallbacks) { |
| 642 if (!CanRunAudioTests()) |
| 643 return; |
| 644 AudioParameters params = GetDefaultInputStreamParameters(); |
| 645 StartInputStreamCallbacks(params); |
| 646 } |
| 647 |
| 648 // Start input streaming using non default input parameters and ensure that the |
| 649 // callback sequence is sane. The only change we make in this test is to select |
| 650 // a 10ms buffer size instead of the default size. |
| 651 // TODO(henrika): possibly add support for more vatiations. |
| 652 TEST_F(AudioAndroidTest, StartInputStreamCallbacksNonDefaultParameters) { |
| 653 if (!CanRunAudioTests()) |
| 654 return; |
| 655 AudioParameters native_params = GetDefaultInputStreamParameters(); |
| 656 AudioParameters params(native_params.format(), |
| 657 native_params.channel_layout(), |
| 658 native_params.sample_rate(), |
| 659 native_params.bits_per_sample(), |
| 660 native_params.sample_rate() / 100); |
| 661 StartInputStreamCallbacks(params); |
| 662 } |
| 663 |
| 664 // Start output streaming using default output parameters and ensure that the |
| 665 // callback sequence is sane. |
| 666 TEST_F(AudioAndroidTest, StartOutputStreamCallbacks) { |
| 667 if (!CanRunAudioTests()) |
| 668 return; |
| 669 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 670 StartOutputStreamCallbacks(params); |
| 671 } |
| 672 |
| 673 // Start output streaming using non default output parameters and ensure that |
| 674 // the callback sequence is sane. The only changed we make in this test is to |
| 675 // select a 10ms buffer size instead of the default size and to open up the |
| 676 // device in mono. |
| 677 // TODO(henrika): possibly add support for more vatiations. |
| 678 TEST_F(AudioAndroidTest, StartOutputStreamCallbacksNonDefaultParameters) { |
| 679 if (!CanRunAudioTests()) |
| 680 return; |
| 681 AudioParameters native_params = GetDefaultOutputStreamParameters(); |
| 682 AudioParameters params(native_params.format(), |
| 683 CHANNEL_LAYOUT_MONO, |
| 684 native_params.sample_rate(), |
| 685 native_params.bits_per_sample(), |
| 686 native_params.sample_rate() / 100); |
| 687 StartOutputStreamCallbacks(params); |
| 688 } |
| 689 |
| 690 TEST_F(AudioAndroidTest, RunOutputStreamWithFileAsSource) { |
| 691 if (!CanRunAudioTests()) |
| 692 return; |
| 693 |
| 694 AudioParameters params = GetDefaultOutputStreamParameters(); |
| 695 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 696 params, std::string()); |
| 697 EXPECT_TRUE(aos); |
| 698 |
| 699 PrintAudioParameters(params); |
| 700 fflush(stdout); |
| 701 |
| 702 std::string file_name; |
| 703 if (params.sample_rate() == 48000 && params.channels() == 2) { |
| 704 file_name = kSpeechFile_16b_s_48k; |
| 705 } else if (params.sample_rate() == 48000 && params.channels() == 1) { |
| 706 file_name = kSpeechFile_16b_m_48k; |
| 707 } else if (params.sample_rate() == 44100 && params.channels() == 2) { |
| 708 file_name = kSpeechFile_16b_s_44k; |
| 709 } else if (params.sample_rate() == 44100 && params.channels() == 1) { |
| 710 file_name = kSpeechFile_16b_m_44k; |
| 711 } else { |
| 712 FAIL() << "This test supports 44.1kHz and 48kHz mono/stereo only."; |
| 713 return; |
| 714 } |
| 715 |
| 716 base::WaitableEvent event(false, false); |
| 717 FileAudioSource source(&event, file_name); |
| 718 |
| 719 EXPECT_TRUE(aos->Open()); |
| 720 aos->SetVolume(1.0); |
| 721 aos->Start(&source); |
| 722 printf(">> Verify that file is played out correctly"); |
| 723 fflush(stdout); |
| 724 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); |
| 725 printf("\n"); |
| 726 aos->Stop(); |
| 727 aos->Close(); |
| 728 } |
| 729 |
| 730 // Start input streaming and run it for ten seconds while recording to a |
| 731 // local audio file. |
| 732 TEST_F(AudioAndroidTest, RunSimplexInputStreamWithFileAsSink) { |
| 733 if (!CanRunAudioTests()) |
| 734 return; |
| 735 |
| 736 AudioParameters params = GetDefaultInputStreamParameters(); |
| 737 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 738 params, AudioManagerBase::kDefaultDeviceId); |
| 739 EXPECT_TRUE(ais); |
| 740 |
| 741 PrintAudioParameters(params); |
| 742 fflush(stdout); |
| 743 |
| 744 std::string file_name = base::StringPrintf("out_simplex_%d_%d_%d.pcm", |
| 745 params.sample_rate(), params.frames_per_buffer(), params.channels()); |
| 746 |
| 747 base::WaitableEvent event(false, false); |
| 748 FileAudioSink sink(&event, params, file_name); |
| 749 |
| 750 EXPECT_TRUE(ais->Open()); |
| 751 ais->Start(&sink); |
| 752 printf(">> Speak into the microphone to record audio"); |
| 753 fflush(stdout); |
| 754 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); |
| 755 printf("\n"); |
| 756 ais->Stop(); |
| 757 ais->Close(); |
| 758 } |
| 759 |
| 760 // Same test as RunSimplexInputStreamWithFileAsSink but this time output |
| 761 // streaming is active as well (reads zeros only). |
| 762 TEST_F(AudioAndroidTest, RunDuplexInputStreamWithFileAsSink) { |
| 763 if (!CanRunAudioTests()) |
| 764 return; |
| 765 |
| 766 AudioParameters in_params = GetDefaultInputStreamParameters(); |
| 767 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 768 in_params, AudioManagerBase::kDefaultDeviceId); |
| 769 EXPECT_TRUE(ais); |
| 770 |
| 771 PrintAudioParameters(in_params); |
| 772 fflush(stdout); |
| 773 |
| 774 AudioParameters out_params = |
| 775 audio_manager()->GetDefaultOutputStreamParameters(); |
| 776 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 777 out_params, std::string()); |
| 778 EXPECT_TRUE(aos); |
| 779 |
| 780 PrintAudioParameters(out_params); |
| 781 fflush(stdout); |
| 782 |
| 783 std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm", |
| 784 in_params.sample_rate(), in_params.frames_per_buffer(), |
| 785 in_params.channels()); |
| 786 |
| 787 base::WaitableEvent event(false, false); |
| 788 FileAudioSink sink(&event, in_params, file_name); |
| 789 |
| 790 EXPECT_TRUE(ais->Open()); |
| 791 EXPECT_TRUE(aos->Open()); |
| 792 ais->Start(&sink); |
| 793 aos->Start(&io_callbacks_); |
| 794 printf(">> Speak into the microphone to record audio"); |
| 795 fflush(stdout); |
| 796 EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); |
| 797 printf("\n"); |
| 798 aos->Stop(); |
| 799 ais->Stop(); |
| 800 aos->Close(); |
| 801 ais->Close(); |
| 802 } |
| 803 |
| 804 TEST_F(AudioAndroidTest, RunInputAndOutputStreamsInFullDuplex) { |
| 805 if (!CanRunAudioTests()) |
| 806 return; |
| 807 |
| 808 // Get native audio parameters for the input side. |
| 809 AudioParameters default_input_params = GetDefaultInputStreamParameters(); |
| 810 |
| 811 // Modify the parameters so that both input and output can use the same |
| 812 // parameters by selecting 10ms as buffer size. This will also ensure that |
| 813 // the output stream will be a mono stream since mono is default for input |
| 814 // audio on Android. |
| 815 AudioParameters io_params(default_input_params.format(), |
| 816 default_input_params.channel_layout(), |
| 817 default_input_params.sample_rate(), |
| 818 default_input_params.bits_per_sample(), |
| 819 default_input_params.sample_rate() / 100); |
| 820 |
| 821 PrintAudioParameters(io_params); |
| 822 fflush(stdout); |
| 823 |
| 824 // Create input and output streams using the common audio parameters. |
| 825 AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| 826 io_params, AudioManagerBase::kDefaultDeviceId); |
| 827 EXPECT_TRUE(ais); |
| 828 AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| 829 io_params, std::string()); |
| 830 EXPECT_TRUE(aos); |
| 831 |
| 832 FullDuplexAudioSinkSource full_duplex(io_params); |
| 833 |
| 834 // Start a full duplex audio session and print out estimates of the extra |
| 835 // delay we should expect from the FIFO. If real-time delay measurements are |
| 836 // performed, the result should be reduced by this extra delay since it is |
| 837 // something that has been added by the test. |
| 838 EXPECT_TRUE(ais->Open()); |
| 839 EXPECT_TRUE(aos->Open()); |
| 840 ais->Start(&full_duplex); |
| 841 aos->Start(&full_duplex); |
| 842 printf(">> Speak into the microphone and listen to the audio in loopback "); |
| 843 fflush(stdout); |
| 844 base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(30)); |
| 845 printf("\n"); |
| 846 aos->Stop(); |
| 847 ais->Stop(); |
| 848 aos->Close(); |
| 849 ais->Close(); |
| 850 } |
| 851 |
| 852 } // namespace media |
| OLD | NEW |