Chromium Code Reviews| Index: media/audio/android/audio_android_unittest.cc |
| diff --git a/media/audio/android/audio_android_unittest.cc b/media/audio/android/audio_android_unittest.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..3a11ed116146ed082d34f318be8366d8536fdeb2 |
| --- /dev/null |
| +++ b/media/audio/android/audio_android_unittest.cc |
| @@ -0,0 +1,874 @@ |
| +// Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/basictypes.h" |
| +#include "base/file_util.h" |
| +#include "base/memory/scoped_ptr.h" |
| +#include "base/message_loop/message_loop.h" |
| +#include "base/path_service.h" |
| +#include "base/strings/stringprintf.h" |
| +#include "base/synchronization/lock.h" |
| +#include "base/synchronization/waitable_event.h" |
| +#include "base/test/test_timeouts.h" |
| +#include "base/time/time.h" |
| +#include "build/build_config.h" |
| +#include "media/audio/android/audio_manager_android.h" |
| +#include "media/audio/audio_io.h" |
| +#include "media/audio/audio_manager_base.h" |
| +#include "media/base/decoder_buffer.h" |
| +#include "media/base/seekable_buffer.h" |
| +#include "media/base/test_data_util.h" |
| +#include "testing/gtest/include/gtest/gtest.h" |
| + |
| +namespace media { |
| + |
| +static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw"; |
| +static const char kSpeechFile_16b_m_48k[] = "speech_16b_mono_48kHz.raw"; |
| +static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw"; |
| +static const char kSpeechFile_16b_m_44k[] = "speech_16b_mono_44kHz.raw"; |
| + |
| +static const int kBitsPerSample = 16; |
| +static const int kBytesPerSample = kBitsPerSample / 8; |
| + |
| +// Implements AudioInputCallback and AudioSourceCallback with some trivial |
| +// additional counting support to keep track of the number of callbacks, |
| +// number or error callbacks etc. It also allows the user to set an expected |
| +// number of callbacks, in any direction, before a provided event is signaled. |
| +class MockAudioInputOutputCallbacks |
|
DaleCurtis
2013/09/05 20:35:23
This seems like it could all be done much more cle
henrika (OOO until Aug 14)
2013/09/06 15:59:51
I feel that this approach gives me more flexibilit
DaleCurtis
2013/09/06 22:05:07
By choosing to manual mock objects you're increasi
|
| + : public AudioInputStream::AudioInputCallback, |
| + public AudioOutputStream::AudioSourceCallback { |
| + public: |
| + MockAudioInputOutputCallbacks() { |
| + Reset(); |
| + }; |
| + virtual ~MockAudioInputOutputCallbacks() {}; |
| + |
| + // Implementation of AudioInputCallback. |
| + virtual void OnData(AudioInputStream* stream, const uint8* src, |
| + uint32 size, uint32 hardware_delay_bytes, |
| + double volume) OVERRIDE { |
| + UpdateCountersAndSignalWhenDone(kInput); |
| + }; |
| + |
| + virtual void OnError(AudioInputStream* stream) OVERRIDE { |
| + errors_[kInput]++; |
| + } |
| + |
| + virtual void OnClose(AudioInputStream* stream) OVERRIDE {} |
| + |
| + // Implementation of AudioSourceCallback. |
| + virtual int OnMoreData(AudioBus* dest, |
| + AudioBuffersState buffers_state) OVERRIDE { |
| + UpdateCountersAndSignalWhenDone(kOutput); |
| + dest->Zero(); |
| + return dest->frames(); |
| + } |
| + |
| + virtual int OnMoreIOData(AudioBus* source, |
| + AudioBus* dest, |
| + AudioBuffersState buffers_state) OVERRIDE { |
| + NOTREACHED(); |
| + return 0; |
| + } |
| + |
| + virtual void OnError(AudioOutputStream* stream) OVERRIDE { |
| + errors_[kOutput]++; |
| + } |
| + |
| + void Reset() { |
| + for (int i = 0; i < 2; ++i) { |
| + callbacks_[i] = 0; |
| + callback_limit_[i] = -1; |
| + errors_[i] = 0; |
| + } |
| + } |
| + |
| + int input_callbacks() { return callbacks_[kInput]; } |
| + |
| + void set_input_callback_limit(base::WaitableEvent* event, |
| + int input_callback_limit) { |
| + event_[kInput] = event; |
| + callback_limit_[kInput] = input_callback_limit; |
| + } |
| + |
| + int input_errors() { return errors_[kInput]; } |
| + |
| + base::TimeTicks input_start_time() { return start_time_[kInput]; } |
| + |
| + base::TimeTicks input_end_time() { return end_time_[kInput]; } |
| + |
| + int output_callbacks() { return callbacks_[kOutput]; } |
| + |
| + void set_output_callback_limit(base::WaitableEvent* event, |
| + int output_callback_limit) { |
| + event_[kOutput] = event; |
| + callback_limit_[kOutput] = output_callback_limit; |
| + } |
| + |
| + int output_errors() { return errors_[kOutput]; } |
| + |
| + base::TimeTicks output_start_time() { return start_time_[kOutput]; } |
| + |
| + base::TimeTicks output_end_time() { return end_time_[kOutput]; } |
| + |
| + double average_time_between_input_callbacks_ms() { |
| + return ((input_end_time() - input_start_time()) / |
| + (input_callbacks() - 1)).InMillisecondsF(); |
| + } |
| + |
| + double average_time_between_output_callbacks_ms() { |
| + return ((output_end_time() - output_start_time()) / |
| + (output_callbacks() - 1)).InMillisecondsF(); |
| + } |
| + |
| + private: |
| + void UpdateCountersAndSignalWhenDone(int dir) { |
| + if (callbacks_[dir] == 0) |
| + start_time_[dir] = base::TimeTicks::Now(); |
| + callbacks_[dir]++; |
| + if (callback_limit_[dir] > 0 && |
| + callbacks_[dir] == callback_limit_[dir]) { |
| + end_time_[dir] = base::TimeTicks::Now(); |
| + event_[dir]->Signal(); |
| + } |
| + } |
| + |
| + enum { |
| + kInput = 0, |
| + kOutput = 1 |
| + }; |
| + |
| + int callbacks_[2]; |
| + int callback_limit_[2]; |
| + int errors_[2]; |
| + base::TimeTicks start_time_[2]; |
| + base::TimeTicks end_time_[2]; |
| + base::WaitableEvent* event_[2]; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(MockAudioInputOutputCallbacks); |
| +}; |
| + |
| +// Implements AudioOutputStream::AudioSourceCallback and provides audio data |
| +// by reading from a data file. |
| +class FileAudioSource : public AudioOutputStream::AudioSourceCallback { |
| + public: |
| + explicit FileAudioSource(base::WaitableEvent* event, const std::string& name) |
| + : event_(event), |
| + pos_(0), |
| + previous_marker_time_(base::TimeTicks::Now()) { |
| + // Reads a test file from media/test/data directory and stores it in |
|
DaleCurtis
2013/09/05 20:35:23
Indent it way off. I'd run clang-format on the fil
henrika (OOO until Aug 14)
2013/09/06 15:59:51
Thanks. Did it on the complete CL. Thanks for the
|
| + // a DecoderBuffer. |
| + file_ = ReadTestDataFile(name); |
| + |
| + // Log the name of the file which is used as input for this test. |
| + base::FilePath file_path = GetTestDataFilePath(name); |
| + printf("Reading from file: %s\n", file_path.value().c_str()); |
|
DaleCurtis
2013/09/05 20:35:23
Generally we avoid visible log messages in unittes
henrika (OOO until Aug 14)
2013/09/06 15:59:51
I will remove it.
|
| + fflush(stdout); |
| + } |
| + |
| + virtual ~FileAudioSource() {} |
| + |
| + // AudioOutputStream::AudioSourceCallback implementation. |
| + |
| + // Use samples read from a data file and fill up the audio buffer |
| + // provided to us in the callback. |
| + virtual int OnMoreData(AudioBus* audio_bus, |
| + AudioBuffersState buffers_state) OVERRIDE { |
| + // Add a '.'-marker once every second. |
| + const base::TimeTicks now_time = base::TimeTicks::Now(); |
| + const int diff = (now_time - previous_marker_time_).InMilliseconds(); |
| + if (diff > 1000) { |
| + printf("."); |
|
DaleCurtis
2013/09/05 20:35:23
Ditto.
|
| + fflush(stdout); |
| + previous_marker_time_ = now_time; |
| + } |
| + |
| + bool stop_playing = false; |
| + int max_size = |
| + audio_bus->frames() * audio_bus->channels() * kBytesPerSample; |
| + |
| + // Adjust data size and prepare for end signal if file has ended. |
| + if (pos_ + max_size > file_size()) { |
| + stop_playing = true; |
| + max_size = file_size() - pos_; |
| + } |
| + |
| + // File data is stored as interleaved 16-bit values. Copy data samples from |
| + // the file and deinterleave to match the audio bus format. |
| + // FromInterleaved() will zero out any unfilled frames when there is not |
| + // sufficient data remaining in the file to fill up the complete frame. |
| + int frames = max_size / (audio_bus->channels() * kBytesPerSample); |
| + if (max_size) { |
| + audio_bus->FromInterleaved( |
| + file_->data() + pos_, frames, kBytesPerSample); |
| + pos_ += max_size; |
| + } |
| + |
| + // Set event to ensure that the test can stop when the file has ended. |
| + if (stop_playing) |
| + event_->Signal(); |
| + |
| + return frames; |
| + } |
| + |
| + virtual int OnMoreIOData(AudioBus* source, |
| + AudioBus* dest, |
| + AudioBuffersState buffers_state) OVERRIDE { |
| + NOTREACHED(); |
| + return 0; |
| + } |
| + |
| + virtual void OnError(AudioOutputStream* stream) OVERRIDE {} |
| + |
| + int file_size() { return file_->data_size(); } |
| + |
| + private: |
| + base::WaitableEvent* event_; |
| + int pos_; |
| + scoped_refptr<DecoderBuffer> file_; |
| + base::TimeTicks previous_marker_time_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(FileAudioSource); |
| +}; |
| + |
| +// Implements AudioInputStream::AudioInputCallback and writes the recorded |
| +// audio data to a local output file. |
| +class FileAudioSink : public AudioInputStream::AudioInputCallback { |
| + public: |
| + explicit FileAudioSink(base::WaitableEvent* event, |
| + const AudioParameters& params, |
| + const std::string& file_name) |
| + : event_(event), |
| + params_(params), |
| + previous_marker_time_(base::TimeTicks::Now()) { |
| + // Allocate space for ~10 seconds of data. |
| + const int kMaxBufferSize = 10 * params.GetBytesPerSecond(); |
| + buffer_.reset(new media::SeekableBuffer(0, kMaxBufferSize)); |
| + |
| + // Open up the binary file which will be written to in the destructor. |
| + base::FilePath file_path; |
| + EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &file_path)); |
| + file_path = file_path.AppendASCII(file_name.c_str()); |
| + binary_file_ = file_util::OpenFile(file_path, "wb"); |
| + DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file."; |
| + printf("Writing to file : %s ", file_path.value().c_str()); |
| + printf("of size %d bytes\n", buffer_->forward_capacity()); |
| + fflush(stdout); |
| + } |
| + |
| + virtual ~FileAudioSink() { |
| + int bytes_written = 0; |
| + while (bytes_written < buffer_->forward_capacity()) { |
| + const uint8* chunk; |
| + int chunk_size; |
| + |
| + // Stop writing if no more data is available. |
| + if (!buffer_->GetCurrentChunk(&chunk, &chunk_size)) |
| + break; |
| + |
| + // Write recorded data chunk to the file and prepare for next chunk. |
| + fwrite(chunk, 1, chunk_size, binary_file_); |
| + buffer_->Seek(chunk_size); |
| + bytes_written += chunk_size; |
| + } |
| + file_util::CloseFile(binary_file_); |
| + } |
| + |
| + // AudioInputStream::AudioInputCallback implementation. |
| + virtual void OnData(AudioInputStream* stream, |
| + const uint8* src, |
| + uint32 size, |
| + uint32 hardware_delay_bytes, |
| + double volume) OVERRIDE { |
| + // Add a '.'-marker once every second. |
| + const base::TimeTicks now_time = base::TimeTicks::Now(); |
| + const int diff = (now_time - previous_marker_time_).InMilliseconds(); |
| + if (diff > 1000) { |
| + printf("."); |
| + fflush(stdout); |
| + previous_marker_time_ = now_time; |
| + } |
| + |
| + // Store data data in a temporary buffer to avoid making blocking |
| + // fwrite() calls in the audio callback. The complete buffer will be |
| + // written to file in the destructor. |
| + if (!buffer_->Append(src, size)) |
| + event_->Signal(); |
| + } |
| + |
| + virtual void OnClose(AudioInputStream* stream) OVERRIDE {} |
| + virtual void OnError(AudioInputStream* stream) OVERRIDE {} |
| + |
| + private: |
| + base::WaitableEvent* event_; |
| + AudioParameters params_; |
| + scoped_ptr<media::SeekableBuffer> buffer_; |
| + FILE* binary_file_; |
| + base::TimeTicks previous_marker_time_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(FileAudioSink); |
| +}; |
| + |
| +// Implements AudioInputCallback and AudioSourceCallback to support full |
| +// duplex audio where captured samples are played out in loopback after |
| +// reading from a temporary FIFO storage. |
| +class FullDuplexAudioSinkSource |
| + : public AudioInputStream::AudioInputCallback, |
| + public AudioOutputStream::AudioSourceCallback { |
| + public: |
| + explicit FullDuplexAudioSinkSource(const AudioParameters& params) |
| + : params_(params), |
| + previous_marker_time_(base::TimeTicks::Now()), |
| + started_(false) { |
| + // Start with a reasonably small FIFO size. It will be increased |
| + // dynamically during the test if required. |
| + fifo_.reset( |
| + new media::SeekableBuffer(0, 2 * params.GetBytesPerBuffer())); |
| + buffer_.reset(new uint8[params_.GetBytesPerBuffer()]); |
| + } |
| + |
| + virtual ~FullDuplexAudioSinkSource() {} |
| + |
| + // AudioInputStream::AudioInputCallback implementation |
| + virtual void OnData(AudioInputStream* stream, const uint8* src, |
| + uint32 size, uint32 hardware_delay_bytes, |
| + double volume) OVERRIDE { |
| + // Add a '.'-marker once every second. |
| + const base::TimeTicks now_time = base::TimeTicks::Now(); |
| + const int diff = (now_time - previous_marker_time_).InMilliseconds(); |
| + |
| + base::AutoLock lock(lock_); |
| + if (diff > 1000) { |
| + started_ = true; |
| + previous_marker_time_ = now_time; |
| + |
| + // Print out the extra delay added by the FIFO. This is a best effort |
| + // estimate. We might be +- 10ms off here. |
| + int extra_fio_delay = static_cast<int>( |
| + BytesToMilliseconds(fifo_->forward_bytes() + size)); |
| + printf("%d ", extra_fio_delay); |
| + fflush(stdout); |
| + } |
| + |
| + // We add an initial delay of ~1 second before loopback starts to ensure |
| + // a stable callback sequence and to avoid initial bursts which might add |
| + // to the extra FIFO delay. |
| + if (!started_) |
| + return; |
| + |
| + // Append new data to the FIFO and extend the size if the mac capacity |
| + // was exceeded. Flush the FIFO if is extended just in case. |
| + if (!fifo_->Append(src, size)) { |
| + fifo_->set_forward_capacity(2 * fifo_->forward_capacity()); |
| + printf("+ "); |
| + fflush(stdout); |
| + fifo_->Clear(); |
| + } |
| + } |
| + |
| + virtual void OnClose(AudioInputStream* stream) OVERRIDE {} |
| + virtual void OnError(AudioInputStream* stream) OVERRIDE {} |
| + |
| + // AudioOutputStream::AudioSourceCallback implementation |
| + virtual int OnMoreData(AudioBus* dest, |
| + AudioBuffersState buffers_state) OVERRIDE { |
| + const int size_in_bytes = |
| + (params_.bits_per_sample() / 8) * dest->frames() * dest->channels(); |
| + EXPECT_EQ(size_in_bytes, params_.GetBytesPerBuffer()); |
| + |
| + base::AutoLock lock(lock_); |
| + |
| + // We add an initial delay of ~1 second before loopback starts to ensure |
| + // a stable callback sequences and to avoid initial bursts which might add |
| + // to the extra FIFO delay. |
| + if (!started_) { |
| + dest->Zero(); |
| + return dest->frames(); |
| + } |
| + |
| + // Fill up destination with zeros if the FIFO does not contain enough |
| + // data to fulfill the request. |
| + if (fifo_->forward_bytes() < size_in_bytes) { |
| + dest->Zero(); |
| + } else { |
| + fifo_->Read(buffer_.get(), size_in_bytes); |
| + dest->FromInterleaved( |
| + buffer_.get(), dest->frames(), params_.bits_per_sample() / 8); |
| + } |
| + |
| + return dest->frames(); |
| + } |
| + |
| + virtual int OnMoreIOData(AudioBus* source, |
| + AudioBus* dest, |
| + AudioBuffersState buffers_state) OVERRIDE { |
| + NOTREACHED(); |
| + return 0; |
| + } |
| + |
| + virtual void OnError(AudioOutputStream* stream) OVERRIDE {} |
| + |
| + private: |
| + // Converts from bytes to milliseconds given number of bytes and existing |
| + // audio parameters. |
| + double BytesToMilliseconds(int bytes) const { |
| + const int frames = bytes / params_.GetBytesPerFrame(); |
| + return (base::TimeDelta::FromMicroseconds( |
| + frames * base::Time::kMicrosecondsPerSecond / |
| + static_cast<float>(params_.sample_rate()))).InMillisecondsF(); |
| + } |
| + |
| + AudioParameters params_; |
| + base::TimeTicks previous_marker_time_; |
| + base::Lock lock_; |
| + scoped_ptr<media::SeekableBuffer> fifo_; |
| + scoped_ptr<uint8[]> buffer_; |
| + bool started_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(FullDuplexAudioSinkSource); |
| +}; |
| + |
| +// Test fixture class. |
| +class AudioAndroidTest : public testing::Test { |
| + public: |
| + AudioAndroidTest() |
| + : audio_manager_(AudioManager::Create()) {} |
| + |
| + virtual ~AudioAndroidTest() {} |
| + |
| + AudioManager* audio_manager() { return audio_manager_.get(); } |
| + |
| + // Converts AudioParameters::Format enumerator to readable string. |
| + std::string FormatToString(AudioParameters::Format format) { |
| + switch (format) { |
| + case AudioParameters::AUDIO_PCM_LINEAR: |
| + return std::string("AUDIO_PCM_LINEAR"); |
| + case AudioParameters::AUDIO_PCM_LOW_LATENCY: |
| + return std::string("AUDIO_PCM_LOW_LATENCY"); |
| + case AudioParameters::AUDIO_FAKE: |
| + return std::string("AUDIO_FAKE"); |
| + case AudioParameters::AUDIO_LAST_FORMAT: |
| + return std::string("AUDIO_LAST_FORMAT"); |
| + default: |
| + return std::string(); |
| + } |
| + } |
| + |
| + // Converts ChannelLayout enumerator to readable string. Does not include |
| + // multi-channel cases since these layouts are not supported on Android. |
| + std::string ChannelLayoutToString(ChannelLayout channel_layout) { |
| + switch (channel_layout) { |
| + case CHANNEL_LAYOUT_NONE: |
| + return std::string("CHANNEL_LAYOUT_NONE"); |
| + case CHANNEL_LAYOUT_UNSUPPORTED: |
| + return std::string("CHANNEL_LAYOUT_UNSUPPORTED"); |
| + case CHANNEL_LAYOUT_MONO: |
| + return std::string("CHANNEL_LAYOUT_MONO"); |
| + case CHANNEL_LAYOUT_STEREO: |
| + return std::string("CHANNEL_LAYOUT_STEREO"); |
| + default: |
| + return std::string("CHANNEL_LAYOUT_UNSUPPORTED"); |
| + } |
| + } |
| + |
| + void PrintAudioParameters(AudioParameters params) { |
| + printf("format : %s\n", FormatToString(params.format()).c_str()); |
| + printf("channel_layout : %s\n", |
| + ChannelLayoutToString(params.channel_layout()).c_str()); |
| + printf("sample_rate : %d\n", params.sample_rate()); |
| + printf("bits_per_sample : %d\n", params.bits_per_sample()); |
| + printf("frames_per_buffer: %d\n", params.frames_per_buffer()); |
| + printf("channels : %d\n", params.channels()); |
| + printf("bytes per buffer : %d\n", params.GetBytesPerBuffer()); |
| + printf("bytes per second : %d\n", params.GetBytesPerSecond()); |
| + printf("bytes per frame : %d\n", params.GetBytesPerFrame()); |
| + printf("frame size in ms : %.2f\n", ExpectedTimeBetweenCallbacks(params)); |
| + } |
| + |
| + AudioParameters GetDefaultInputStreamParameters() { |
| + return audio_manager()->GetInputStreamParameters( |
| + AudioManagerBase::kDefaultDeviceId); |
| + } |
| + |
| + AudioParameters GetDefaultOutputStreamParameters() { |
| + return audio_manager()->GetDefaultOutputStreamParameters(); |
| + } |
| + |
| + double ExpectedTimeBetweenCallbacks(AudioParameters params) const { |
| + return (base::TimeDelta::FromMicroseconds( |
| + params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond / |
| + static_cast<float>(params.sample_rate()))).InMillisecondsF(); |
| + } |
| + |
| + #define START_STREAM_AND_WAIT_FOR_EVENT(stream, dir) \ |
|
DaleCurtis
2013/09/05 20:35:23
Seems you could just make this a templated functio
henrika (OOO until Aug 14)
2013/09/06 15:59:51
Guess I could also have done it as a separate func
DaleCurtis
2013/09/06 22:05:07
Chrome is a C++ based project, so we try to avoid
tommi (sloooow) - chröme
2013/09/08 18:53:42
Agree. As an additional thing to think about, the
|
| + base::WaitableEvent event(false, false); \ |
| + io_callbacks_.set_ ## dir ## _callback_limit(&event, num_callbacks); \ |
| + EXPECT_TRUE(stream->Open()); \ |
| + stream->Start(&io_callbacks_); \ |
| + EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); \ |
| + stream->Stop(); \ |
| + stream->Close(); \ |
| + EXPECT_GE(io_callbacks_.dir ## _callbacks(), num_callbacks); \ |
| + EXPECT_LE(io_callbacks_.dir ## _callbacks(), num_callbacks + 1); \ |
| + EXPECT_EQ(io_callbacks_.dir ## _errors(), 0); \ |
| + printf("expected time between callbacks: %.2fms\n", \ |
| + time_between_callbacks_ms); \ |
| + double actual_time_between_callbacks_ms = \ |
| + io_callbacks_.average_time_between_ ## dir ## _callbacks_ms(); \ |
| + printf("actual time between callbacks: %.2fms\n", \ |
| + actual_time_between_callbacks_ms); \ |
| + EXPECT_GE(actual_time_between_callbacks_ms, \ |
| + 0.70 * time_between_callbacks_ms); \ |
| + EXPECT_LE(actual_time_between_callbacks_ms, \ |
| + 1.30 * time_between_callbacks_ms) \ |
| + |
| + void StartInputStreamCallbacks(const AudioParameters& params) { |
| + double time_between_callbacks_ms = ExpectedTimeBetweenCallbacks(params); |
| + const int num_callbacks = (2000.0 / time_between_callbacks_ms); |
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| + params, AudioManagerBase::kDefaultDeviceId); |
| + EXPECT_TRUE(ais); |
| + START_STREAM_AND_WAIT_FOR_EVENT(ais, input); |
| + } |
| + |
| + void StartOutputStreamCallbacks(const AudioParameters& params) { |
| + double time_between_callbacks_ms = ExpectedTimeBetweenCallbacks(params); |
| + const int num_callbacks = (2000.0 / time_between_callbacks_ms); |
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| + params, std::string(), std::string()); |
| + EXPECT_TRUE(aos); |
| + START_STREAM_AND_WAIT_FOR_EVENT(aos, output); |
| + } |
| + |
| + #undef START_STREAM_AND_WAIT_FOR_EVENT |
| + |
| + #define MULTIPLE_START_STREAM_AND_WAIT_FOR_EVENT(stream, dir) \ |
|
DaleCurtis
2013/09/05 20:35:23
Ditto.
henrika (OOO until Aug 14)
2013/09/06 15:59:51
Now removed.
|
| + const int kNumCallbacks = 5; \ |
| + const int kNumIterations = 3; \ |
| + base::WaitableEvent event(false, false); \ |
| + EXPECT_TRUE(stream->Open()); \ |
| + for (int i = 0; i < kNumIterations; ++i) { \ |
| + io_callbacks_.Reset(); \ |
| + io_callbacks_.set_ ## dir ## _callback_limit(&event, kNumCallbacks); \ |
| + stream->Start(&io_callbacks_); \ |
| + EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); \ |
| + stream->Stop(); \ |
| + EXPECT_EQ(io_callbacks_.dir ## _errors(), 0); \ |
| + EXPECT_GE(io_callbacks_.dir ## _callbacks(), kNumCallbacks); \ |
| + EXPECT_LE(io_callbacks_.dir ## _callbacks(), kNumCallbacks + 1); \ |
| + } \ |
| + stream->Close() \ |
| + |
| + void MultipleStartStopInputStreamCallbacks(const AudioParameters& params) { |
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| + params, AudioManagerBase::kDefaultDeviceId); |
| + EXPECT_TRUE(ais); |
| + MULTIPLE_START_STREAM_AND_WAIT_FOR_EVENT(ais, input); |
| + } |
| + |
| + void MultipleStartStopOutputStreamCallbacks(const AudioParameters& params) { |
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| + params, std::string(), std::string()); |
| + EXPECT_TRUE(aos); |
| + MULTIPLE_START_STREAM_AND_WAIT_FOR_EVENT(aos, output); |
| + } |
| + |
| + #undef MULTIPLE_START_STREAM_AND_WAIT_FOR_EVENT |
| + |
| + protected: |
| + base::MessageLoopForUI message_loop_; |
| + scoped_ptr<AudioManager> audio_manager_; |
| + MockAudioInputOutputCallbacks io_callbacks_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(AudioAndroidTest); |
| +}; |
| + |
| +// Get the default audio input parameters and log the result. |
| +TEST_F(AudioAndroidTest, GetInputStreamParameters) { |
| + AudioParameters params = GetDefaultInputStreamParameters(); |
| + EXPECT_TRUE(params.IsValid()); |
| + PrintAudioParameters(params); |
| +} |
| + |
| +// Get the default audio output parameters and log the result. |
| +TEST_F(AudioAndroidTest, GetDefaultOutputStreamParameters) { |
| + AudioParameters params = GetDefaultOutputStreamParameters(); |
| + EXPECT_TRUE(params.IsValid()); |
| + PrintAudioParameters(params); |
| +} |
| + |
| +// Check if low-latency output is supported and log the result as output. |
| +TEST_F(AudioAndroidTest, IsAudioLowLatencySupported) { |
| + AudioManagerAndroid* manager = |
| + static_cast<AudioManagerAndroid*>(audio_manager()); |
| + bool low_latency = manager->IsAudioLowLatencySupported(); |
| + low_latency ? printf("Low latency output is supported\n") : |
|
DaleCurtis
2013/09/05 20:35:23
Weird style and use of printf. Why not just if (x
henrika (OOO until Aug 14)
2013/09/06 15:59:51
Done.
|
| + printf("Low latency output is *not* supported\n"); |
| +} |
| + |
| +// Ensure that a default input stream can be created and closed. |
| +TEST_F(AudioAndroidTest, CreateAndCloseInputStream) { |
| + AudioParameters params = GetDefaultInputStreamParameters(); |
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| + params, AudioManagerBase::kDefaultDeviceId); |
| + EXPECT_TRUE(ais); |
| + ais->Close(); |
| +} |
| + |
| +// Ensure that a default output stream can be created and closed. |
| +// TODO(henrika): should we also verify that this API changes the audio mode |
| +// to communication mode, and calls RegisterHeadsetReceiver, the first time |
| +// it is called? |
| +TEST_F(AudioAndroidTest, CreateAndCloseOutputStream) { |
| + AudioParameters params = GetDefaultOutputStreamParameters(); |
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| + params, std::string(), std::string()); |
| + EXPECT_TRUE(aos); |
| + aos->Close(); |
| +} |
| + |
| +// Ensure that a default input stream can be opened and closed. |
| +TEST_F(AudioAndroidTest, OpenAndCloseInputStream) { |
| + AudioParameters params = GetDefaultInputStreamParameters(); |
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| + params, AudioManagerBase::kDefaultDeviceId); |
| + EXPECT_TRUE(ais); |
| + EXPECT_TRUE(ais->Open()); |
| + ais->Close(); |
| +} |
| + |
| +// Ensure that a default output stream can be opened and closed. |
| +TEST_F(AudioAndroidTest, OpenAndCloseOutputStream) { |
| + AudioParameters params = GetDefaultOutputStreamParameters(); |
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| + params, std::string(), std::string()); |
| + EXPECT_TRUE(aos); |
| + EXPECT_TRUE(aos->Open()); |
| + aos->Close(); |
| +} |
| + |
| +// Start input streaming using default input parameters and ensure that the |
| +// callback sequence is sane. |
| +TEST_F(AudioAndroidTest, StartInputStreamCallbacks) { |
| + AudioParameters params = GetDefaultInputStreamParameters(); |
| + StartInputStreamCallbacks(params); |
| +} |
| + |
| +// Start input streaming using non default input parameters and ensure that the |
| +// callback sequence is sane. The only change we make in this test is to select |
| +// a 10ms buffer size instead of the default size. |
| +// TODO(henrika): possibly add support for more variations. |
| +TEST_F(AudioAndroidTest, StartInputStreamCallbacksNonDefaultParameters) { |
| + AudioParameters native_params = GetDefaultInputStreamParameters(); |
| + AudioParameters params(native_params.format(), |
| + native_params.channel_layout(), |
| + native_params.sample_rate(), |
| + native_params.bits_per_sample(), |
| + native_params.sample_rate() / 100); |
| + StartInputStreamCallbacks(params); |
| +} |
| + |
| +// Do repeated Start/Stop calling sequences and verify that we are able to |
| +// restart recording multiple times. |
| +TEST_F(AudioAndroidTest, MultipleStartStopInputStreamCallbacks) { |
| + AudioParameters params = GetDefaultInputStreamParameters(); |
| + MultipleStartStopInputStreamCallbacks(params); |
| +} |
| + |
| +// Do repeated Start/Stop calling sequences and verify that we are able to |
| +// restart playout multiple times. |
| +TEST_F(AudioAndroidTest, MultipleStartStopOutputStreamCallbacks) { |
| + AudioParameters params = GetDefaultOutputStreamParameters(); |
| + MultipleStartStopOutputStreamCallbacks(params); |
| +} |
| + |
| +// Start output streaming using default output parameters and ensure that the |
| +// callback sequence is sane. |
| +TEST_F(AudioAndroidTest, StartOutputStreamCallbacks) { |
| + AudioParameters params = GetDefaultOutputStreamParameters(); |
| + StartOutputStreamCallbacks(params); |
| +} |
| + |
| +// Start output streaming using non default output parameters and ensure that |
| +// the callback sequence is sane. The only changed we make in this test is to |
| +// select a 10ms buffer size instead of the default size and to open up the |
| +// device in mono. |
| +// TODO(henrika): possibly add support for more variations. |
| +TEST_F(AudioAndroidTest, StartOutputStreamCallbacksNonDefaultParameters) { |
| + AudioParameters native_params = GetDefaultOutputStreamParameters(); |
| + AudioParameters params(native_params.format(), |
| + CHANNEL_LAYOUT_MONO, |
| + native_params.sample_rate(), |
| + native_params.bits_per_sample(), |
| + native_params.sample_rate() / 100); |
| + StartOutputStreamCallbacks(params); |
| +} |
| + |
| +// Play out a PCM file segment in real time and allow the user to verify that |
| +// the rendered audio sounds OK. |
| +// NOTE: this test requires user interaction and is not designed to run as an |
| +// automatized test on bots. |
| +TEST_F(AudioAndroidTest, DISABLED_RunOutputStreamWithFileAsSource) { |
| + AudioParameters params = GetDefaultOutputStreamParameters(); |
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| + params, std::string(), std::string()); |
| + EXPECT_TRUE(aos); |
| + |
| + PrintAudioParameters(params); |
| + fflush(stdout); |
| + |
| + std::string file_name; |
| + if (params.sample_rate() == 48000 && params.channels() == 2) { |
| + file_name = kSpeechFile_16b_s_48k; |
| + } else if (params.sample_rate() == 48000 && params.channels() == 1) { |
| + file_name = kSpeechFile_16b_m_48k; |
| + } else if (params.sample_rate() == 44100 && params.channels() == 2) { |
| + file_name = kSpeechFile_16b_s_44k; |
| + } else if (params.sample_rate() == 44100 && params.channels() == 1) { |
| + file_name = kSpeechFile_16b_m_44k; |
| + } else { |
| + FAIL() << "This test supports 44.1kHz and 48kHz mono/stereo only."; |
| + return; |
| + } |
| + |
| + base::WaitableEvent event(false, false); |
| + FileAudioSource source(&event, file_name); |
| + |
| + EXPECT_TRUE(aos->Open()); |
| + aos->SetVolume(1.0); |
| + aos->Start(&source); |
| + printf(">> Verify that file is played out correctly"); |
| + fflush(stdout); |
| + EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); |
| + printf("\n"); |
| + aos->Stop(); |
| + aos->Close(); |
| +} |
| + |
| +// Start input streaming and run it for ten seconds while recording to a |
| +// local audio file. |
| +// NOTE: this test requires user interaction and is not designed to run as an |
| +// automatized test on bots. |
| +TEST_F(AudioAndroidTest, DISABLED_RunSimplexInputStreamWithFileAsSink) { |
| + AudioParameters params = GetDefaultInputStreamParameters(); |
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| + params, AudioManagerBase::kDefaultDeviceId); |
| + EXPECT_TRUE(ais); |
| + |
| + PrintAudioParameters(params); |
| + fflush(stdout); |
| + |
| + std::string file_name = base::StringPrintf("out_simplex_%d_%d_%d.pcm", |
| + params.sample_rate(), params.frames_per_buffer(), params.channels()); |
| + |
| + base::WaitableEvent event(false, false); |
| + FileAudioSink sink(&event, params, file_name); |
| + |
| + EXPECT_TRUE(ais->Open()); |
| + ais->Start(&sink); |
| + printf(">> Speak into the microphone to record audio"); |
| + fflush(stdout); |
| + EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); |
| + printf("\n"); |
| + ais->Stop(); |
| + ais->Close(); |
| +} |
| + |
| +// Same test as RunSimplexInputStreamWithFileAsSink but this time output |
| +// streaming is active as well (reads zeros only). |
| +// NOTE: this test requires user interaction and is not designed to run as an |
| +// automatized test on bots. |
| +TEST_F(AudioAndroidTest, DISABLED_RunDuplexInputStreamWithFileAsSink) { |
| + AudioParameters in_params = GetDefaultInputStreamParameters(); |
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| + in_params, AudioManagerBase::kDefaultDeviceId); |
| + EXPECT_TRUE(ais); |
| + |
| + PrintAudioParameters(in_params); |
| + fflush(stdout); |
| + |
| + AudioParameters out_params = |
| + audio_manager()->GetDefaultOutputStreamParameters(); |
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| + out_params, std::string(), std::string()); |
| + EXPECT_TRUE(aos); |
| + |
| + PrintAudioParameters(out_params); |
| + fflush(stdout); |
| + |
| + std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm", |
| + in_params.sample_rate(), in_params.frames_per_buffer(), |
| + in_params.channels()); |
| + |
| + base::WaitableEvent event(false, false); |
| + FileAudioSink sink(&event, in_params, file_name); |
| + |
| + EXPECT_TRUE(ais->Open()); |
| + EXPECT_TRUE(aos->Open()); |
| + ais->Start(&sink); |
| + aos->Start(&io_callbacks_); |
| + printf(">> Speak into the microphone to record audio"); |
| + fflush(stdout); |
| + EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout())); |
| + printf("\n"); |
| + aos->Stop(); |
| + ais->Stop(); |
| + aos->Close(); |
| + ais->Close(); |
| +} |
| + |
| +// Start audio in both directions while feeding captured data into a FIFO so |
| +// it can be read directly (in loopback) by the render side. A small extra |
| +// delay will be added by the FIFO and an estimate of this delay will be |
| +// printed out during the test. |
| +// NOTE: this test requires user interaction and is not designed to run as an |
| +// automatized test on bots. |
| +TEST_F(AudioAndroidTest, |
| + DISABLED_RunSymmetricInputAndOutputStreamsInFullDuplex) { |
| + // Get native audio parameters for the input side. |
| + AudioParameters default_input_params = GetDefaultInputStreamParameters(); |
| + |
| + // Modify the parameters so that both input and output can use the same |
| + // parameters by selecting 10ms as buffer size. This will also ensure that |
| + // the output stream will be a mono stream since mono is default for input |
| + // audio on Android. |
| + AudioParameters io_params(default_input_params.format(), |
| + default_input_params.channel_layout(), |
| + default_input_params.sample_rate(), |
| + default_input_params.bits_per_sample(), |
| + default_input_params.sample_rate() / 100); |
| + PrintAudioParameters(io_params); |
| + fflush(stdout); |
| + |
| + // Create input and output streams using the common audio parameters. |
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream( |
| + io_params, AudioManagerBase::kDefaultDeviceId); |
| + EXPECT_TRUE(ais); |
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream( |
| + io_params, std::string(), std::string()); |
| + EXPECT_TRUE(aos); |
| + |
| + FullDuplexAudioSinkSource full_duplex(io_params); |
| + |
| + // Start a full duplex audio session and print out estimates of the extra |
| + // delay we should expect from the FIFO. If real-time delay measurements are |
| + // performed, the result should be reduced by this extra delay since it is |
| + // something that has been added by the test. |
| + EXPECT_TRUE(ais->Open()); |
| + EXPECT_TRUE(aos->Open()); |
| + ais->Start(&full_duplex); |
| + aos->Start(&full_duplex); |
| + printf("HINT: an estimate of the extra FIFO delay will be updated once per " |
| + "second during this test.\n"); |
| + printf(">> Speak into the mic and listen to the audio in loopback...\n"); |
| + fflush(stdout); |
| + base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(20)); |
| + printf("\n"); |
| + aos->Stop(); |
| + ais->Stop(); |
| + aos->Close(); |
| + ais->Close(); |
| +} |
| + |
| +} // namespace media |