| Index: media/audio/android/audio_android_unittest.cc
|
| diff --git a/media/audio/android/audio_android_unittest.cc b/media/audio/android/audio_android_unittest.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..4bc6be8b25344bbdda621babe003e615821f6701
|
| --- /dev/null
|
| +++ b/media/audio/android/audio_android_unittest.cc
|
| @@ -0,0 +1,852 @@
|
| +// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "base/basictypes.h"
|
| +#include "base/file_util.h"
|
| +#include "base/memory/scoped_ptr.h"
|
| +#include "base/message_loop/message_loop.h"
|
| +#include "base/path_service.h"
|
| +#include "base/strings/stringprintf.h"
|
| +#include "base/synchronization/lock.h"
|
| +#include "base/synchronization/waitable_event.h"
|
| +#include "base/test/test_timeouts.h"
|
| +#include "base/time/time.h"
|
| +#include "build/build_config.h"
|
| +#include "media/audio/android/audio_manager_android.h"
|
| +#include "media/audio/audio_io.h"
|
| +#include "media/audio/audio_manager_base.h"
|
| +#include "media/base/decoder_buffer.h"
|
| +#include "media/base/seekable_buffer.h"
|
| +#include "media/base/test_data_util.h"
|
| +#include "testing/gtest/include/gtest/gtest.h"
|
| +
|
| +namespace media {
|
| +
|
| +static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw";
|
| +static const char kSpeechFile_16b_m_48k[] = "speech_16b_mono_48kHz.raw";
|
| +static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw";
|
| +static const char kSpeechFile_16b_m_44k[] = "speech_16b_mono_44kHz.raw";
|
| +
|
| +static const int kBitsPerSample = 16;
|
| +
|
| +// TODO(henrika): add commens...
|
| +class MockAudioInputOutputCallbacks
|
| + : public AudioInputStream::AudioInputCallback,
|
| + public AudioOutputStream::AudioSourceCallback {
|
| + public:
|
| + MockAudioInputOutputCallbacks()
|
| + : input_callbacks_(0),
|
| + output_callbacks_(0),
|
| + input_callback_limit_(-1),
|
| + output_callback_limit_(-1),
|
| + input_errors_(0),
|
| + output_errors_(0) {};
|
| + virtual ~MockAudioInputOutputCallbacks() {};
|
| +
|
| + // Implementation of AudioInputCallback.
|
| + virtual void OnData(AudioInputStream* stream, const uint8* src,
|
| + uint32 size, uint32 hardware_delay_bytes,
|
| + double volume) OVERRIDE {
|
| + // int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
|
| + // DVLOG(1) << "##" << thread_id;
|
| +
|
| + if (input_callbacks_ == 0)
|
| + input_start_time_ = base::TimeTicks::Now();
|
| +
|
| + input_callbacks_++;
|
| +
|
| + if (input_callback_limit_ > 0 &&
|
| + input_callbacks_ == input_callback_limit_) {
|
| + input_end_time_ = base::TimeTicks::Now();
|
| + input_event_->Signal();
|
| + }
|
| + };
|
| + virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
|
| + virtual void OnError(AudioInputStream* stream) OVERRIDE {
|
| + input_errors_++;
|
| + }
|
| +
|
| + // Add comments....
|
| + virtual int OnMoreData(AudioBus* dest,
|
| + AudioBuffersState buffers_state) OVERRIDE {
|
| + // DVLOG(1) << "--- OnMoreData ---";
|
| + if (output_callbacks_ == 0)
|
| + output_start_time_ = base::TimeTicks::Now();
|
| +
|
| + output_callbacks_++;
|
| +
|
| + if (output_callback_limit_ > 0 &&
|
| + output_callbacks_ == output_callback_limit_) {
|
| + output_end_time_ = base::TimeTicks::Now();
|
| + output_event_->Signal();
|
| + }
|
| +
|
| + dest->Zero();
|
| + return dest->frames();
|
| + }
|
| +
|
| + virtual int OnMoreIOData(AudioBus* source,
|
| + AudioBus* dest,
|
| + AudioBuffersState buffers_state) {
|
| + NOTREACHED();
|
| + return 0;
|
| + }
|
| +
|
| + virtual void OnError(AudioOutputStream* stream) OVERRIDE {
|
| + output_errors_++;
|
| + }
|
| +
|
| + int input_callbacks() { return input_callbacks_; }
|
| + void set_input_callback_limit(base::WaitableEvent* event,
|
| + int input_callback_limit) {
|
| + input_event_ = event;
|
| + input_callback_limit_ = input_callback_limit;
|
| + }
|
| + int input_errors() { return input_errors_; }
|
| + base::TimeTicks input_start_time() { return input_start_time_; }
|
| + base::TimeTicks input_end_time() { return input_end_time_; }
|
| +
|
| + int output_callbacks() { return output_callbacks_; }
|
| + void set_output_callback_limit(base::WaitableEvent* event,
|
| + int output_callback_limit) {
|
| + output_event_ = event;
|
| + output_callback_limit_ = output_callback_limit;
|
| + }
|
| + int output_errors() { return output_errors_; }
|
| + base::TimeTicks output_start_time() { return output_start_time_; }
|
| + base::TimeTicks output_end_time() { return output_end_time_; }
|
| +
|
| + private:
|
| + int input_callbacks_;
|
| + int output_callbacks_;
|
| + int input_callback_limit_;
|
| + int output_callback_limit_;
|
| + int input_errors_;
|
| + int output_errors_;
|
| + base::TimeTicks input_start_time_;
|
| + base::TimeTicks output_start_time_;
|
| + base::TimeTicks input_end_time_;
|
| + base::TimeTicks output_end_time_;
|
| + base::WaitableEvent* input_event_;
|
| + base::WaitableEvent* output_event_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(MockAudioInputOutputCallbacks);
|
| +};
|
| +
|
| +// Implements AudioOutputStream::AudioSourceCallback and provides audio data
|
| +// by reading from a data file.
|
| +class FileAudioSource : public AudioOutputStream::AudioSourceCallback {
|
| + public:
|
| + explicit FileAudioSource(base::WaitableEvent* event, const std::string& name)
|
| + : event_(event),
|
| + pos_(0),
|
| + previous_marker_time_(base::TimeTicks::Now()) {
|
| + // Reads a test file from media/test/data directory and stores it in
|
| + // a DecoderBuffer.
|
| + file_ = ReadTestDataFile(name);
|
| +
|
| + // Log the name of the file which is used as input for this test.
|
| + base::FilePath file_path = GetTestDataFilePath(name);
|
| + printf("Reading from file: %s\n", file_path.value().c_str());
|
| + fflush(stdout);
|
| + }
|
| +
|
| + virtual ~FileAudioSource() {}
|
| +
|
| + // AudioOutputStream::AudioSourceCallback implementation.
|
| +
|
| + // Use samples read from a data file and fill up the audio buffer
|
| + // provided to us in the callback.
|
| + virtual int OnMoreData(AudioBus* audio_bus,
|
| + AudioBuffersState buffers_state) {
|
| + // Add a '.'-marker once every second.
|
| + const base::TimeTicks now_time = base::TimeTicks::Now();
|
| + const int diff = (now_time - previous_marker_time_).InMilliseconds();
|
| + if (diff > 1000) {
|
| + printf(".");
|
| + fflush(stdout);
|
| + previous_marker_time_ = now_time;
|
| + }
|
| +
|
| + int max_size =
|
| + audio_bus->frames() * audio_bus->channels() * kBitsPerSample / 8;
|
| +
|
| + bool stop_playing = false;
|
| +
|
| + // Adjust data size and prepare for end signal if file has ended.
|
| + if (pos_ + static_cast<int>(max_size) > file_size()) {
|
| + stop_playing = true;
|
| + max_size = file_size() - pos_;
|
| + }
|
| +
|
| + // File data is stored as interleaved 16-bit values. Copy data samples from
|
| + // the file and deinterleave to match the audio bus format.
|
| + // FromInterleaved() will zero out any unfilled frames when there is not
|
| + // sufficient data remaining in the file to fill up the complete frame.
|
| + int frames = max_size / (audio_bus->channels() * kBitsPerSample / 8);
|
| + if (max_size) {
|
| + audio_bus->FromInterleaved(
|
| + file_->data() + pos_, frames, kBitsPerSample / 8);
|
| + pos_ += max_size;
|
| + }
|
| +
|
| + // Set event to ensure that the test can stop when the file has ended.
|
| + if (stop_playing)
|
| + event_->Signal();
|
| +
|
| + return frames;
|
| + }
|
| +
|
| + virtual int OnMoreIOData(AudioBus* source,
|
| + AudioBus* dest,
|
| + AudioBuffersState buffers_state) OVERRIDE {
|
| + NOTREACHED();
|
| + return 0;
|
| + }
|
| +
|
| + virtual void OnError(AudioOutputStream* stream) {}
|
| +
|
| + int file_size() { return file_->data_size(); }
|
| +
|
| + private:
|
| + base::WaitableEvent* event_;
|
| + int pos_;
|
| + scoped_refptr<DecoderBuffer> file_;
|
| + base::TimeTicks previous_marker_time_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(FileAudioSource);
|
| +};
|
| +
|
| +// Implements AudioInputStream::AudioInputCallback and writes the recorded
|
| +// audio data to a local output file.
|
| +class FileAudioSink : public AudioInputStream::AudioInputCallback {
|
| + public:
|
| + explicit FileAudioSink(base::WaitableEvent* event,
|
| + const AudioParameters& params,
|
| + const std::string& file_name)
|
| + : event_(event),
|
| + params_(params),
|
| + previous_marker_time_(base::TimeTicks::Now()) {
|
| + // Allocate space for ~10 seconds of data.
|
| + const int kMaxBufferSize = 10 * params.GetBytesPerSecond();
|
| + buffer_.reset(new media::SeekableBuffer(0, kMaxBufferSize));
|
| +
|
| + // Open up the binary file which will be written to in the destructor.
|
| + base::FilePath file_path;
|
| + EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
|
| + file_path = file_path.AppendASCII(file_name.c_str());
|
| + binary_file_ = file_util::OpenFile(file_path, "wb");
|
| + DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
|
| + printf("Writing to file : %s ", file_path.value().c_str());
|
| + printf("of size %d bytes\n", buffer_->forward_capacity());
|
| + fflush(stdout);
|
| + }
|
| +
|
| + virtual ~FileAudioSink() {
|
| + int bytes_written = 0;
|
| + while (bytes_written < buffer_->forward_capacity()) {
|
| + const uint8* chunk;
|
| + int chunk_size;
|
| +
|
| + // Stop writing if no more data is available.
|
| + if (!buffer_->GetCurrentChunk(&chunk, &chunk_size))
|
| + break;
|
| +
|
| + // Write recorded data chunk to the file and prepare for next chunk.
|
| + fwrite(chunk, 1, chunk_size, binary_file_);
|
| + buffer_->Seek(chunk_size);
|
| + bytes_written += chunk_size;
|
| + }
|
| + file_util::CloseFile(binary_file_);
|
| + }
|
| +
|
| + // AudioInputStream::AudioInputCallback implementation.
|
| + virtual void OnData(AudioInputStream* stream,
|
| + const uint8* src,
|
| + uint32 size,
|
| + uint32 hardware_delay_bytes,
|
| + double volume) {
|
| + // Add a '.'-marker once every second.
|
| + const base::TimeTicks now_time = base::TimeTicks::Now();
|
| + const int diff = (now_time - previous_marker_time_).InMilliseconds();
|
| + if (diff > 1000) {
|
| + printf(".");
|
| + fflush(stdout);
|
| + previous_marker_time_ = now_time;
|
| + }
|
| +
|
| + // Store data data in a temporary buffer to avoid making blocking
|
| + // fwrite() calls in the audio callback. The complete buffer will be
|
| + // written to file in the destructor.
|
| + if (!buffer_->Append(src, size))
|
| + event_->Signal();
|
| + }
|
| +
|
| + virtual void OnClose(AudioInputStream* stream) {}
|
| + virtual void OnError(AudioInputStream* stream) {}
|
| +
|
| + private:
|
| + base::WaitableEvent* event_;
|
| + AudioParameters params_;
|
| + scoped_ptr<media::SeekableBuffer> buffer_;
|
| + FILE* binary_file_;
|
| + base::TimeTicks previous_marker_time_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(FileAudioSink);
|
| +};
|
| +
|
| +// Implements AudioInputCallback and AudioSourceCallback to support full
|
| +// duplex audio where captured samples are played out in loopback after
|
| +// reading from a temporary FIFO storage.
|
| +class FullDuplexAudioSinkSource
|
| + : public AudioInputStream::AudioInputCallback,
|
| + public AudioOutputStream::AudioSourceCallback {
|
| + public:
|
| + explicit FullDuplexAudioSinkSource(const AudioParameters& params)
|
| + : params_(params),
|
| + previous_marker_time_(base::TimeTicks::Now()),
|
| + started_(false) {
|
| + // Start with a reasonably small FIFO size. It will be increased
|
| + // dynamically during the test if required.
|
| + fifo_.reset(
|
| + new media::SeekableBuffer(0, 2 * params.GetBytesPerBuffer()));
|
| + buffer_.reset(new uint8[params_.GetBytesPerBuffer()]);
|
| + }
|
| +
|
| + virtual ~FullDuplexAudioSinkSource() {}
|
| +
|
| + // AudioInputStream::AudioInputCallback implementation
|
| + virtual void OnData(AudioInputStream* stream, const uint8* src,
|
| + uint32 size, uint32 hardware_delay_bytes,
|
| + double volume) OVERRIDE {
|
| + // Add a '.'-marker once every second.
|
| + const base::TimeTicks now_time = base::TimeTicks::Now();
|
| + const int diff = (now_time - previous_marker_time_).InMilliseconds();
|
| +
|
| + base::AutoLock lock(lock_);
|
| + if (diff > 1000) {
|
| + started_ = true;
|
| + previous_marker_time_ = now_time;
|
| +
|
| + // Print out the extra delay added by the FIFO. This is a best effort
|
| + // estimate. We might be +- 10ms off here.
|
| + int extra_fio_delay = static_cast<int>(
|
| + BytesToMilliseconds(fifo_->forward_bytes() + size));
|
| + printf("%d ", extra_fio_delay);
|
| + fflush(stdout);
|
| + }
|
| +
|
| + // We add an inital delay of ~1 second before loopback starts to ensure
|
| + // a stable callback sequcence and to avoid inital burts which might add
|
| + // to the extra FIFO delay.
|
| + if (!started_)
|
| + return;
|
| +
|
| + if (!fifo_->Append(src, size)) {
|
| + fifo_->set_forward_capacity(2 * fifo_->forward_capacity());
|
| + }
|
| + }
|
| +
|
| + virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
|
| + virtual void OnError(AudioInputStream* stream) OVERRIDE {}
|
| +
|
| + // AudioOutputStream::AudioSourceCallback implementation
|
| + virtual int OnMoreData(AudioBus* dest,
|
| + AudioBuffersState buffers_state) OVERRIDE {
|
| + const int size_in_bytes =
|
| + (kBitsPerSample / 8) * dest->frames() * dest->channels();
|
| + EXPECT_EQ(size_in_bytes, params_.GetBytesPerBuffer());
|
| +
|
| + base::AutoLock lock(lock_);
|
| +
|
| + // We add an inital delay of ~1 second before loopback starts to ensure
|
| + // a stable callback sequcence and to avoid inital burts which might add
|
| + // to the extra FIFO delay.
|
| + if (!started_) {
|
| + dest->Zero();
|
| + return dest->frames();
|
| + }
|
| +
|
| + // Fill up destionation with zeros if the FIFO does not contain enough
|
| + // data to fulfill the request.
|
| + if (fifo_->forward_bytes() < size_in_bytes) {
|
| + dest->Zero();
|
| + } else {
|
| + fifo_->Read(buffer_.get(), size_in_bytes);
|
| + dest->FromInterleaved(
|
| + buffer_.get(), dest->frames(), kBitsPerSample / 8);
|
| + }
|
| +
|
| + return dest->frames();
|
| + }
|
| + virtual int OnMoreIOData(AudioBus* source,
|
| + AudioBus* dest,
|
| + AudioBuffersState buffers_state) OVERRIDE {
|
| + NOTREACHED();
|
| + return 0;
|
| + }
|
| + virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
|
| +
|
| + private:
|
| + // Converts from bytes to milliseconds given number of bytes and existing
|
| + // audio parameters.
|
| + double BytesToMilliseconds(int bytes) const {
|
| + const int frames = bytes / params_.GetBytesPerFrame();
|
| + return (base::TimeDelta::FromMicroseconds(
|
| + frames * base::Time::kMicrosecondsPerSecond /
|
| + static_cast<float>(params_.sample_rate()))).InMillisecondsF();
|
| + }
|
| +
|
| + AudioParameters params_;
|
| + base::TimeTicks previous_marker_time_;
|
| + base::Lock lock_;
|
| + scoped_ptr<media::SeekableBuffer> fifo_;
|
| + scoped_ptr<uint8[]> buffer_;
|
| + bool started_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(FullDuplexAudioSinkSource);
|
| +};
|
| +
|
| +// Test fixture class.
|
| +class AudioAndroidTest : public testing::Test {
|
| + public:
|
| + AudioAndroidTest()
|
| + : audio_manager_(AudioManager::Create()) {}
|
| +
|
| + virtual ~AudioAndroidTest() {}
|
| +
|
| + AudioManager* audio_manager() { return audio_manager_.get(); }
|
| +
|
| + // Convenience method which ensures that we are not running on the build
|
| + // bots and that at least one valid input and output device can be found.
|
| + bool CanRunAudioTests() {
|
| + bool input = audio_manager()->HasAudioInputDevices();
|
| + bool output = audio_manager()->HasAudioOutputDevices();
|
| + LOG_IF(WARNING, !input) << "No input device detected.";
|
| + LOG_IF(WARNING, !output) << "No output device detected.";
|
| + return input && output;
|
| + }
|
| +
|
| + // Converts AudioParameters::Format enumerator to readable string.
|
| + std::string FormatToString(AudioParameters::Format format) {
|
| + if (format == AudioParameters::AUDIO_PCM_LINEAR)
|
| + return std::string("AUDIO_PCM_LINEAR");
|
| + else if (format == AudioParameters::AUDIO_PCM_LOW_LATENCY)
|
| + return std::string("AUDIO_PCM_LOW_LATENCY");
|
| + else if (format == AudioParameters::AUDIO_FAKE)
|
| + return std::string("AUDIO_FAKE");
|
| + else if (format == AudioParameters::AUDIO_LAST_FORMAT)
|
| + return std::string("AUDIO_LAST_FORMAT");
|
| + else
|
| + return std::string();
|
| + }
|
| +
|
| + // Converts ChannelLayout enumerator to readable string. Does not include
|
| + // multi-channel cases since these layouts are not supported on Android.
|
| + std::string ChannelLayoutToString(ChannelLayout channel_layout) {
|
| + if (channel_layout == CHANNEL_LAYOUT_NONE)
|
| + return std::string("CHANNEL_LAYOUT_NONE");
|
| + else if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)
|
| + return std::string("CHANNEL_LAYOUT_UNSUPPORTED");
|
| + else if (channel_layout == CHANNEL_LAYOUT_MONO)
|
| + return std::string("CHANNEL_LAYOUT_MONO");
|
| + else if (channel_layout == CHANNEL_LAYOUT_STEREO)
|
| + return std::string("CHANNEL_LAYOUT_STEREO");
|
| + else
|
| + return std::string("CHANNEL_LAYOUT_UNSUPPORTED");
|
| + }
|
| +
|
| + void PrintAudioParameters(AudioParameters params) {
|
| + printf("format : %s\n", FormatToString(params.format()).c_str());
|
| + printf("channel_layout : %s\n",
|
| + ChannelLayoutToString(params.channel_layout()).c_str());
|
| + printf("sample_rate : %d\n", params.sample_rate());
|
| + printf("bits_per_sample : %d\n", params.bits_per_sample());
|
| + printf("frames_per_buffer: %d\n", params.frames_per_buffer());
|
| + printf("channels : %d\n", params.channels());
|
| + printf("bytes per buffer : %d\n", params.GetBytesPerBuffer());
|
| + printf("bytes per second : %d\n", params.GetBytesPerSecond());
|
| + printf("bytes per frame : %d\n", params.GetBytesPerFrame());
|
| + printf("frame size in ms : %.2f\n", TimeBetweenCallbacks(params));
|
| + }
|
| +
|
| + AudioParameters GetDefaultInputStreamParameters() {
|
| + return audio_manager()->GetInputStreamParameters(
|
| + AudioManagerBase::kDefaultDeviceId);
|
| + }
|
| +
|
| + AudioParameters GetDefaultOutputStreamParameters() {
|
| + return audio_manager()->GetDefaultOutputStreamParameters();
|
| + }
|
| +
|
| + double TimeBetweenCallbacks(AudioParameters params) const {
|
| + return (base::TimeDelta::FromMicroseconds(
|
| + params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
|
| + static_cast<float>(params.sample_rate()))).InMillisecondsF();
|
| + }
|
| +
|
| + #define START_STREAM_AND_WAIT_FOR_EVENT(stream) \
|
| + EXPECT_TRUE(stream->Open()); \
|
| + stream->Start(&io_callbacks_); \
|
| + EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout())); \
|
| + stream->Stop(); \
|
| + stream->Close()
|
| +
|
| + void StartInputStreamCallbacks(const AudioParameters& params) {
|
| + double time_between_callbacks_ms = TimeBetweenCallbacks(params);
|
| + const int num_callbacks = (1000.0 / time_between_callbacks_ms);
|
| +
|
| + base::WaitableEvent event(false, false);
|
| + io_callbacks_.set_input_callback_limit(&event, num_callbacks);
|
| +
|
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
|
| + params, AudioManagerBase::kDefaultDeviceId);
|
| + EXPECT_TRUE(ais);
|
| + START_STREAM_AND_WAIT_FOR_EVENT(ais);
|
| +
|
| + EXPECT_GE(io_callbacks_.input_callbacks(), num_callbacks - 1);
|
| + EXPECT_LE(io_callbacks_.input_callbacks(), num_callbacks + 1);
|
| + EXPECT_EQ(io_callbacks_.input_errors(), 0);
|
| +
|
| + double actual_time_between_callbacks_ms = (
|
| + (io_callbacks_.input_end_time() - io_callbacks_.input_start_time()) /
|
| + (io_callbacks_.input_callbacks() - 1)).InMillisecondsF();
|
| + printf("time between callbacks: %.2fms\n", time_between_callbacks_ms);
|
| + printf("actual time between callbacks: %.2fms\n",
|
| + actual_time_between_callbacks_ms);
|
| + EXPECT_GE(actual_time_between_callbacks_ms,
|
| + 0.75 * time_between_callbacks_ms);
|
| + EXPECT_LE(actual_time_between_callbacks_ms,
|
| + 1.25 * time_between_callbacks_ms);
|
| + }
|
| +
|
| + void StartOutputStreamCallbacks(const AudioParameters& params) {
|
| + double time_between_callbacks_ms = TimeBetweenCallbacks(params);
|
| + const int num_callbacks = (1000.0 / time_between_callbacks_ms);
|
| +
|
| + base::WaitableEvent event(false, false);
|
| + io_callbacks_.set_output_callback_limit(&event, num_callbacks);
|
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
|
| + params, std::string());
|
| + EXPECT_TRUE(aos);
|
| + START_STREAM_AND_WAIT_FOR_EVENT(aos);
|
| +
|
| + EXPECT_GE(io_callbacks_.output_callbacks(), num_callbacks - 1);
|
| + EXPECT_LE(io_callbacks_.output_callbacks(), num_callbacks + 1);
|
| + EXPECT_EQ(io_callbacks_.output_errors(), 0);
|
| +
|
| + double actual_time_between_callbacks_ms = (
|
| + (io_callbacks_.output_end_time() - io_callbacks_.output_start_time()) /
|
| + (io_callbacks_.output_callbacks() - 1)).InMillisecondsF();
|
| + printf("time between callbacks: %.2fms\n", time_between_callbacks_ms);
|
| + printf("actual time between callbacks: %.2fms\n",
|
| + actual_time_between_callbacks_ms);
|
| + EXPECT_GE(actual_time_between_callbacks_ms,
|
| + 0.75 * time_between_callbacks_ms);
|
| + EXPECT_LE(actual_time_between_callbacks_ms,
|
| + 1.25 * time_between_callbacks_ms);
|
| + }
|
| +
|
| + #undef START_STREAM_AND_WAIT_FOR_EVENT
|
| +
|
| + protected:
|
| + base::MessageLoopForUI message_loop_;
|
| + scoped_ptr<AudioManager> audio_manager_;
|
| + MockAudioInputOutputCallbacks io_callbacks_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(AudioAndroidTest);
|
| +};
|
| +
|
| +// Get the default audio input parameters and log the result.
|
| +TEST_F(AudioAndroidTest, GetInputStreamParameters) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters params = GetDefaultInputStreamParameters();
|
| + EXPECT_TRUE(params.IsValid());
|
| + PrintAudioParameters(params);
|
| +}
|
| +
|
| +// Get the default audio output parameters and log the result.
|
| +TEST_F(AudioAndroidTest, GetDefaultOutputStreamParameters) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters params = GetDefaultOutputStreamParameters();
|
| + EXPECT_TRUE(params.IsValid());
|
| + PrintAudioParameters(params);
|
| +}
|
| +
|
| +// Check if low-latency output is supported and log the result as output.
|
| +TEST_F(AudioAndroidTest, IsAudioLowLatencySupported) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioManagerAndroid* manager =
|
| + static_cast<AudioManagerAndroid*>(audio_manager());
|
| + bool low_latency = manager->IsAudioLowLatencySupported();
|
| + low_latency ? printf("Low latency output is supported\n") :
|
| + printf("Low latency output is *not* supported\n");
|
| +}
|
| +
|
| +// Ensure that a default input stream can be created and closed.
|
| +TEST_F(AudioAndroidTest, CreateAndCloseInputStream) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters params = GetDefaultInputStreamParameters();
|
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
|
| + params, AudioManagerBase::kDefaultDeviceId);
|
| + EXPECT_TRUE(ais);
|
| + ais->Close();
|
| +}
|
| +
|
| +// Ensure that a default output stream can be created and closed.
|
| +// TODO(henrika): should we also verify that this API changes the audio mode
|
| +// to communication mode, and calls RegisterHeadsetReceiver, the first time
|
| +// it is called?
|
| +TEST_F(AudioAndroidTest, CreateAndCloseOutputStream) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters params = GetDefaultOutputStreamParameters();
|
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
|
| + params, std::string());
|
| + EXPECT_TRUE(aos);
|
| + aos->Close();
|
| +}
|
| +
|
| +// Ensure that a default input stream can be opened and closed.
|
| +TEST_F(AudioAndroidTest, OpenAndCloseInputStream) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters params = GetDefaultInputStreamParameters();
|
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
|
| + params, AudioManagerBase::kDefaultDeviceId);
|
| + EXPECT_TRUE(ais);
|
| + EXPECT_TRUE(ais->Open());
|
| + ais->Close();
|
| +}
|
| +
|
| +// Ensure that a default output stream can be opened and closed.
|
| +TEST_F(AudioAndroidTest, OpenAndCloseOutputStream) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters params = GetDefaultOutputStreamParameters();
|
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
|
| + params, std::string());
|
| + EXPECT_TRUE(aos);
|
| + EXPECT_TRUE(aos->Open());
|
| + aos->Close();
|
| +}
|
| +
|
| +// Start input streaming using default input parameters and ensure that the
|
| +// callback sequence is sane.
|
| +TEST_F(AudioAndroidTest, StartInputStreamCallbacks) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters params = GetDefaultInputStreamParameters();
|
| + StartInputStreamCallbacks(params);
|
| +}
|
| +
|
| +// Start input streaming using non default input parameters and ensure that the
|
| +// callback sequence is sane. The only change we make in this test is to select
|
| +// a 10ms buffer size instead of the default size.
|
| +// TODO(henrika): possibly add support for more vatiations.
|
| +TEST_F(AudioAndroidTest, StartInputStreamCallbacksNonDefaultParameters) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters native_params = GetDefaultInputStreamParameters();
|
| + AudioParameters params(native_params.format(),
|
| + native_params.channel_layout(),
|
| + native_params.sample_rate(),
|
| + native_params.bits_per_sample(),
|
| + native_params.sample_rate() / 100);
|
| + StartInputStreamCallbacks(params);
|
| +}
|
| +
|
| +// Start output streaming using default output parameters and ensure that the
|
| +// callback sequence is sane.
|
| +TEST_F(AudioAndroidTest, StartOutputStreamCallbacks) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters params = GetDefaultOutputStreamParameters();
|
| + StartOutputStreamCallbacks(params);
|
| +}
|
| +
|
| +// Start output streaming using non default output parameters and ensure that
|
| +// the callback sequence is sane. The only changed we make in this test is to
|
| +// select a 10ms buffer size instead of the default size and to open up the
|
| +// device in mono.
|
| +// TODO(henrika): possibly add support for more vatiations.
|
| +TEST_F(AudioAndroidTest, StartOutputStreamCallbacksNonDefaultParameters) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| + AudioParameters native_params = GetDefaultOutputStreamParameters();
|
| + AudioParameters params(native_params.format(),
|
| + CHANNEL_LAYOUT_MONO,
|
| + native_params.sample_rate(),
|
| + native_params.bits_per_sample(),
|
| + native_params.sample_rate() / 100);
|
| + StartOutputStreamCallbacks(params);
|
| +}
|
| +
|
| +TEST_F(AudioAndroidTest, RunOutputStreamWithFileAsSource) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| +
|
| + AudioParameters params = GetDefaultOutputStreamParameters();
|
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
|
| + params, std::string());
|
| + EXPECT_TRUE(aos);
|
| +
|
| + PrintAudioParameters(params);
|
| + fflush(stdout);
|
| +
|
| + std::string file_name;
|
| + if (params.sample_rate() == 48000 && params.channels() == 2) {
|
| + file_name = kSpeechFile_16b_s_48k;
|
| + } else if (params.sample_rate() == 48000 && params.channels() == 1) {
|
| + file_name = kSpeechFile_16b_m_48k;
|
| + } else if (params.sample_rate() == 44100 && params.channels() == 2) {
|
| + file_name = kSpeechFile_16b_s_44k;
|
| + } else if (params.sample_rate() == 44100 && params.channels() == 1) {
|
| + file_name = kSpeechFile_16b_m_44k;
|
| + } else {
|
| + FAIL() << "This test supports 44.1kHz and 48kHz mono/stereo only.";
|
| + return;
|
| + }
|
| +
|
| + base::WaitableEvent event(false, false);
|
| + FileAudioSource source(&event, file_name);
|
| +
|
| + EXPECT_TRUE(aos->Open());
|
| + aos->SetVolume(1.0);
|
| + aos->Start(&source);
|
| + printf(">> Verify that file is played out correctly");
|
| + fflush(stdout);
|
| + EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
|
| + printf("\n");
|
| + aos->Stop();
|
| + aos->Close();
|
| +}
|
| +
|
| +// Start input streaming and run it for ten seconds while recording to a
|
| +// local audio file.
|
| +TEST_F(AudioAndroidTest, RunSimplexInputStreamWithFileAsSink) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| +
|
| + AudioParameters params = GetDefaultInputStreamParameters();
|
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
|
| + params, AudioManagerBase::kDefaultDeviceId);
|
| + EXPECT_TRUE(ais);
|
| +
|
| + PrintAudioParameters(params);
|
| + fflush(stdout);
|
| +
|
| + std::string file_name = base::StringPrintf("out_simplex_%d_%d_%d.pcm",
|
| + params.sample_rate(), params.frames_per_buffer(), params.channels());
|
| +
|
| + base::WaitableEvent event(false, false);
|
| + FileAudioSink sink(&event, params, file_name);
|
| +
|
| + EXPECT_TRUE(ais->Open());
|
| + ais->Start(&sink);
|
| + printf(">> Speak into the microphone to record audio");
|
| + fflush(stdout);
|
| + EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
|
| + printf("\n");
|
| + ais->Stop();
|
| + ais->Close();
|
| +}
|
| +
|
| +// Same test as RunSimplexInputStreamWithFileAsSink but this time output
|
| +// streaming is active as well (reads zeros only).
|
| +TEST_F(AudioAndroidTest, RunDuplexInputStreamWithFileAsSink) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| +
|
| + AudioParameters in_params = GetDefaultInputStreamParameters();
|
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
|
| + in_params, AudioManagerBase::kDefaultDeviceId);
|
| + EXPECT_TRUE(ais);
|
| +
|
| + PrintAudioParameters(in_params);
|
| + fflush(stdout);
|
| +
|
| + AudioParameters out_params =
|
| + audio_manager()->GetDefaultOutputStreamParameters();
|
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
|
| + out_params, std::string());
|
| + EXPECT_TRUE(aos);
|
| +
|
| + PrintAudioParameters(out_params);
|
| + fflush(stdout);
|
| +
|
| + std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm",
|
| + in_params.sample_rate(), in_params.frames_per_buffer(),
|
| + in_params.channels());
|
| +
|
| + base::WaitableEvent event(false, false);
|
| + FileAudioSink sink(&event, in_params, file_name);
|
| +
|
| + EXPECT_TRUE(ais->Open());
|
| + EXPECT_TRUE(aos->Open());
|
| + ais->Start(&sink);
|
| + aos->Start(&io_callbacks_);
|
| + printf(">> Speak into the microphone to record audio");
|
| + fflush(stdout);
|
| + EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
|
| + printf("\n");
|
| + aos->Stop();
|
| + ais->Stop();
|
| + aos->Close();
|
| + ais->Close();
|
| +}
|
| +
|
| +TEST_F(AudioAndroidTest, RunInputAndOutputStreamsInFullDuplex) {
|
| + if (!CanRunAudioTests())
|
| + return;
|
| +
|
| + // Get native audio parameters for the input side.
|
| + AudioParameters default_input_params = GetDefaultInputStreamParameters();
|
| +
|
| + // Modify the parameters so that both input and output can use the same
|
| + // parameters by selecting 10ms as buffer size. This will also ensure that
|
| + // the output stream will be a mono stream since mono is default for input
|
| + // audio on Android.
|
| + AudioParameters io_params(default_input_params.format(),
|
| + default_input_params.channel_layout(),
|
| + default_input_params.sample_rate(),
|
| + default_input_params.bits_per_sample(),
|
| + default_input_params.sample_rate() / 100);
|
| +
|
| + PrintAudioParameters(io_params);
|
| + fflush(stdout);
|
| +
|
| + // Create input and output streams using the common audio parameters.
|
| + AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
|
| + io_params, AudioManagerBase::kDefaultDeviceId);
|
| + EXPECT_TRUE(ais);
|
| + AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
|
| + io_params, std::string());
|
| + EXPECT_TRUE(aos);
|
| +
|
| + FullDuplexAudioSinkSource full_duplex(io_params);
|
| +
|
| + // Start a full duplex audio session and print out estimates of the extra
|
| + // delay we should expect from the FIFO. If real-time delay measurements are
|
| + // performed, the result should be reduced by this extra delay since it is
|
| + // something that has been added by the test.
|
| + EXPECT_TRUE(ais->Open());
|
| + EXPECT_TRUE(aos->Open());
|
| + ais->Start(&full_duplex);
|
| + aos->Start(&full_duplex);
|
| + printf(">> Speak into the microphone and listen to the audio in loopback ");
|
| + fflush(stdout);
|
| + base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(30));
|
| + printf("\n");
|
| + aos->Stop();
|
| + ais->Stop();
|
| + aos->Close();
|
| + ais->Close();
|
| +}
|
| +
|
| +} // namespace media
|
|
|