| Index: content/renderer/media/speech_recognition_audio_source_provider_unittest.cc
|
| diff --git a/content/renderer/media/speech_recognition_audio_source_provider_unittest.cc b/content/renderer/media/speech_recognition_audio_source_provider_unittest.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..b027b6a8bbad0cc9e38c87b1013192d8a7d2c035
|
| --- /dev/null
|
| +++ b/content/renderer/media/speech_recognition_audio_source_provider_unittest.cc
|
| @@ -0,0 +1,422 @@
|
| +// Copyright 2014 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "content/renderer/media/speech_recognition_audio_source_provider.h"
|
| +
|
| +#include "base/strings/utf_string_conversions.h"
|
| +#include "content/renderer/media/mock_media_constraint_factory.h"
|
| +#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
|
| +#include "content/renderer/media/webrtc_local_audio_track.h"
|
| +#include "media/audio/audio_parameters.h"
|
| +#include "media/base/audio_bus.h"
|
| +#include "testing/gmock/include/gmock/gmock.h"
|
| +#include "testing/gtest/include/gtest/gtest.h"
|
| +#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
|
| +
|
| +namespace {
|
| +
|
| +// Input audio format.
|
| +static const media::AudioParameters::Format kInputFormat =
|
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY;
|
| +const media::ChannelLayout kInputChannelLayout = media::CHANNEL_LAYOUT_MONO;
|
| +const int kInputChannels = 1;
|
| +const int kInputSampleRate = 44100;
|
| +const int kInputBitsPerSample = 16;
|
| +const int kInputFramesPerBuffer = 441;
|
| +
|
| +// Output audio format.
|
| +const media::AudioParameters::Format kOutputFormat =
|
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY;
|
| +const media::ChannelLayout kOutputChannelLayout = media::CHANNEL_LAYOUT_STEREO;
|
| +const int kOutputChannels = 2;
|
| +const int kOutputSampleRate = 16000;
|
| +const int kOutputBitsPerSample = 16;
|
| +const int kOutputFramesPerBuffer = 1600;
|
| +
|
| +// Minimal number of buffers which trigger a single SyncSocket transfer.
|
| +const size_t kBuffersPerNotification =
|
| + (kOutputFramesPerBuffer * kInputSampleRate) /
|
| + (kInputFramesPerBuffer * kOutputSampleRate);
|
| +
|
| +// Number of frames of the source audio.
|
| +const size_t kSourceDataLength = kInputFramesPerBuffer * kInputChannels;
|
| +
|
| +} // namespace
|
| +
|
| +////////////////////////////////////////////////////////////////////////////////
|
| +
|
| +namespace content {
|
| +
|
| +// Mocked out sockets used for Send/Receive.
|
| +// Data is written and read from a shared buffer used as a FIFO and there is
|
| +// no blocking. |OnSendCB| is used to trigger a |Receive| on the other socket.
|
| +class MockSyncSocket : public base::SyncSocket {
|
| + public:
|
| + // This allows for 2 requests in queue between the |MockSyncSocket|s.
|
| + static const int kSharedBufferSize = 8;
|
| + // Buffer to be shared between two |MockSyncSocket|s. Allocated on heap.
|
| + struct SharedBuffer {
|
| + SharedBuffer() : start(0), length(0) {}
|
| +
|
| + uint8 data[kSharedBufferSize];
|
| + size_t start;
|
| + size_t length;
|
| + };
|
| +
|
| + // Callback used for pairing an A.Send() with B.Receieve() without blocking.
|
| + typedef base::Callback<void()> OnSendCB;
|
| +
|
| + explicit MockSyncSocket(SharedBuffer* shared_buffer);
|
| + MockSyncSocket(SharedBuffer* shared_buffer, const OnSendCB& on_send_cb);
|
| +
|
| + virtual size_t Send(const void* buffer, size_t length) OVERRIDE;
|
| + virtual size_t Receive(void* buffer, size_t length) OVERRIDE;
|
| +
|
| + // When |in_failure_mode_| == true, the socket fails to send.
|
| + void SetFailureMode(bool in_failure_mode) {
|
| + in_failure_mode_ = in_failure_mode;
|
| + }
|
| +
|
| + private:
|
| + SharedBuffer* buffer_;
|
| + const OnSendCB on_send_cb_;
|
| + bool in_failure_mode_;
|
| +};
|
| +
|
| +MockSyncSocket::MockSyncSocket(SharedBuffer* buffer)
|
| + : buffer_(buffer), in_failure_mode_(false) { }
|
| +
|
| +MockSyncSocket::MockSyncSocket(SharedBuffer* buffer, const OnSendCB& on_send_cb)
|
| + : buffer_(buffer), on_send_cb_(on_send_cb), in_failure_mode_(false) {}
|
| +
|
| +size_t MockSyncSocket::Send(const void* buffer, size_t length) {
|
| + if (in_failure_mode_)
|
| + return 0;
|
| +
|
| + uint8* b = static_cast<uint8*>(const_cast<void*>(buffer));
|
| + for (size_t i = 0; i < length; i++, buffer_->length++)
|
| + buffer_->data[buffer_->start + buffer_->length] = b[i];
|
| +
|
| + on_send_cb_.Run();
|
| + return length;
|
| +}
|
| +
|
| +size_t MockSyncSocket::Receive(void* buffer, size_t length) {
|
| + uint8* b = static_cast<uint8*>(const_cast<void*>(buffer));
|
| + for (size_t i = buffer_->start; i < buffer_->length; i++, buffer_->start++)
|
| + b[i] = buffer_->data[buffer_->start];
|
| +
|
| + // Since buffer is used atomically, we can reset the buffer indices here.
|
| + buffer_->start = buffer_->length = 0;
|
| + return length;
|
| +}
|
| +
|
| +////////////////////////////////////////////////////////////////////////////////
|
| +
|
| +class FakeSpeechRecognizer {
|
| + public:
|
| + FakeSpeechRecognizer() : is_responsive_(true) {}
|
| + ~FakeSpeechRecognizer() {}
|
| +
|
| + void Initialize(
|
| + const blink::WebMediaStreamTrack& track,
|
| + const media::AudioParameters& sink_params,
|
| + const SpeechRecognitionAudioSourceProvider::OnStoppedCB& on_stopped_cb);
|
| +
|
| + // TODO(burnik): Move from the recognizer to the test.
|
| + SpeechRecognitionAudioSourceProvider* SourceProvider() {
|
| + return audio_source_provider_.get();
|
| + }
|
| +
|
| + // Emulates a single iteraton of a thread receiving on the socket.
|
| + // Emulates the receive on the socket. This would normally be done on a
|
| + // receiving thread's loop on the browser.
|
| + void EmulateReceiveThreadLoopIteration() {
|
| + // When not responsive do nothing as if the process is busy.
|
| + if (!is_responsive_)
|
| + return;
|
| +
|
| + local_socket_->Receive(shared_buffer_index_, sizeof(*shared_buffer_index_));
|
| + // Notify the producer that the audio buffer has been consumed.
|
| + (*shared_buffer_index_)++;
|
| + }
|
| +
|
| + // Used to simulate an unresponsive behaviour of the consumer.
|
| + void SimulateResponsiveness(bool is_responsive) {
|
| + is_responsive_ = is_responsive;
|
| + }
|
| +
|
| + // Used to simulate a problem with sockets.
|
| + void SetFailureModeOnForeignSocket(bool in_failure_mode) {
|
| + foreign_socket_->SetFailureMode(in_failure_mode);
|
| + }
|
| +
|
| + uint32 buffer_index() { return *shared_buffer_index_; }
|
| + media::AudioBus* audio_bus() const { return audio_track_bus_.get(); }
|
| +
|
| + private:
|
| + bool is_responsive_;
|
| + // Shared memory for the audio and synchronization.
|
| + scoped_ptr<base::SharedMemory> shared_memory_;
|
| +
|
| + // Fake sockets shared buffer.
|
| + scoped_ptr<MockSyncSocket::SharedBuffer> shared_buffer_;
|
| + scoped_ptr<MockSyncSocket> local_socket_;
|
| + scoped_ptr<MockSyncSocket> foreign_socket_;
|
| +
|
| + // Audio bus wrapping the shared memory from the renderer.
|
| + scoped_ptr<media::AudioBus> audio_track_bus_;
|
| +
|
| + uint32* shared_buffer_index_;
|
| + // Producer. TODO(burnik): this should be outside the recognizer.
|
| + scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_;
|
| +};
|
| +
|
| +void FakeSpeechRecognizer::Initialize(
|
| + const blink::WebMediaStreamTrack& track,
|
| + const media::AudioParameters& sink_params,
|
| + const SpeechRecognitionAudioSourceProvider::OnStoppedCB& on_stopped_cb) {
|
| + // Shared memory is allocated, mapped and shared.
|
| + uint32 shared_memory_size = sizeof(media::AudioInputBufferParameters) +
|
| + media::AudioBus::CalculateMemorySize(sink_params);
|
| + shared_memory_.reset(new base::SharedMemory());
|
| + ASSERT_TRUE(shared_memory_->CreateAndMapAnonymous(shared_memory_size));
|
| +
|
| + base::SharedMemoryHandle foreign_memory_handle;
|
| + ASSERT_TRUE(shared_memory_->ShareToProcess(base::GetCurrentProcessHandle(),
|
| + &foreign_memory_handle));
|
| +
|
| + media::AudioInputBuffer* buffer =
|
| + static_cast<media::AudioInputBuffer*>(shared_memory_->memory());
|
| + audio_track_bus_ = media::AudioBus::WrapMemory(sink_params, buffer->audio);
|
| +
|
| + // Reference to the counter used to synchronize.
|
| + shared_buffer_index_ = &(buffer->params.size);
|
| + *shared_buffer_index_ = 0U;
|
| +
|
| + // Create a shared buffer for the |MockSyncSocket|s.
|
| + shared_buffer_.reset(new MockSyncSocket::SharedBuffer());
|
| +
|
| + // Local socket will receive signals from the producer.
|
| + local_socket_.reset(new MockSyncSocket(shared_buffer_.get()));
|
| +
|
| + // We automatically trigger a Receive when data is sent over the socket.
|
| + foreign_socket_.reset(new MockSyncSocket(
|
| + shared_buffer_.get(),
|
| + base::Bind(&FakeSpeechRecognizer::EmulateReceiveThreadLoopIteration,
|
| + base::Unretained(this))));
|
| +
|
| + // This is usually done to pair the sockets. Here it's not effective.
|
| + base::SyncSocket::CreatePair(local_socket_.get(), foreign_socket_.get());
|
| +
|
| + // Create the producer. TODO(burnik): move out of the recognizer.
|
| + audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider(
|
| + track, sink_params, foreign_memory_handle, foreign_socket_.get(),
|
| + on_stopped_cb));
|
| +}
|
| +
|
| +////////////////////////////////////////////////////////////////////////////////
|
| +
|
| +class SpeechRecognitionAudioSourceProviderTest : public testing::Test {
|
| + public:
|
| + SpeechRecognitionAudioSourceProviderTest() {
|
| + // Audio Environment setup.
|
| + source_params_.Reset(kInputFormat, kInputChannelLayout, kInputChannels,
|
| + kInputSampleRate, kInputBitsPerSample,
|
| + kInputFramesPerBuffer);
|
| +
|
| + sink_params_.Reset(kOutputFormat, kOutputChannelLayout, kOutputChannels,
|
| + kOutputSampleRate, kOutputBitsPerSample,
|
| + kOutputFramesPerBuffer);
|
| +
|
| + source_data_.reset(new int16[kSourceDataLength]);
|
| +
|
| + // Prepare the track and audio source.
|
| + blink::WebMediaStreamTrack blink_track;
|
| + PrepareTrackWithMediaStreamType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track);
|
| +
|
| + // Get the native track from the blink track and initialize.
|
| + native_track_ =
|
| + static_cast<WebRtcLocalAudioTrack*>(blink_track.extraData());
|
| + native_track_->OnSetFormat(source_params_);
|
| +
|
| + // Create and initialize the consumer.
|
| + recognizer_ = new FakeSpeechRecognizer();
|
| + recognizer_->Initialize(
|
| + blink_track, sink_params_,
|
| + base::Bind(&SpeechRecognitionAudioSourceProviderTest::StoppedCallback,
|
| + base::Unretained(this)));
|
| +
|
| + // Init the producer.
|
| + audio_source_provider_.reset(recognizer_->SourceProvider());
|
| + }
|
| +
|
| + // Mock callback for when the track is stopped.
|
| + MOCK_METHOD0(StoppedCallback, void());
|
| +
|
| + protected:
|
| + static void PrepareTrackWithMediaStreamType(
|
| + const MediaStreamType device_type,
|
| + blink::WebMediaStreamTrack* blink_track) {
|
| + // Device info.
|
| + StreamDeviceInfo device_info(device_type, "Mock audio device",
|
| + "mock_audio_device_id");
|
| +
|
| + // Constraints.
|
| + MockMediaConstraintFactory constraint_factory;
|
| + const blink::WebMediaConstraints constraints =
|
| + constraint_factory.CreateWebMediaConstraints();
|
| +
|
| + // Capturer.
|
| + scoped_refptr<WebRtcAudioCapturer> capturer(
|
| + WebRtcAudioCapturer::CreateCapturer(-1, device_info, constraints, NULL,
|
| + NULL));
|
| +
|
| + // Adapter.
|
| + scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
|
| + WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
|
| +
|
| + // Native track.
|
| + scoped_ptr<WebRtcLocalAudioTrack> native_track(
|
| + new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL));
|
| +
|
| + // Blink audio source.
|
| + blink::WebMediaStreamSource blink_audio_source;
|
| + blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"),
|
| + blink::WebMediaStreamSource::TypeAudio,
|
| + base::UTF8ToUTF16("dummy_source_name"));
|
| + MediaStreamSource::SourceStoppedCallback cb;
|
| + blink_audio_source.setExtraData(
|
| + new MediaStreamAudioSource(-1, device_info, cb, NULL));
|
| +
|
| + // Blink track.
|
| + blink_track->initialize(blink::WebString::fromUTF8("audio_track"),
|
| + blink_audio_source);
|
| + blink_track->setExtraData(native_track.release());
|
| + }
|
| +
|
| + // Emulates an audio capture device capturing data from the source.
|
| + inline void CaptureAudio(const size_t buffers) {
|
| + for (size_t i = 0; i < buffers; ++i)
|
| + native_track_->Capture(source_data_.get(),
|
| + base::TimeDelta::FromMilliseconds(0), 1, false,
|
| + false);
|
| + }
|
| +
|
| + // Helper method to verify captured audio data has been consumed.
|
| + inline void AssertConsumedBuffers(const size_t buffer_index) {
|
| + ASSERT_EQ(buffer_index, recognizer_->buffer_index());
|
| + }
|
| +
|
| + // Helper method to push audio data to producer and verify it was consumed.
|
| + inline void CaptureAudioAndAssertConsumedBuffers(const size_t buffers,
|
| + const size_t buffer_index) {
|
| + CaptureAudio(buffers);
|
| + AssertConsumedBuffers(buffer_index);
|
| + }
|
| +
|
| + // Producer.
|
| + scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_;
|
| + // Consumer.
|
| + FakeSpeechRecognizer* recognizer_;
|
| + // Audio related members.
|
| + scoped_ptr<int16[]> source_data_;
|
| + media::AudioParameters source_params_;
|
| + media::AudioParameters sink_params_;
|
| + WebRtcLocalAudioTrack* native_track_;
|
| +};
|
| +
|
| +////////////////////////////////////////////////////////////////////////////////
|
| +
|
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, CheckIsSupportedTrackType) {
|
| + typedef std::map<MediaStreamType, bool> SupportedTrackPolicy;
|
| + // This test must be aligned with the policy of supported tracks.
|
| + SupportedTrackPolicy p;
|
| + p[MEDIA_NO_SERVICE] = false;
|
| + p[MEDIA_DEVICE_AUDIO_CAPTURE] = true; // Only one supported for now.
|
| + p[MEDIA_DEVICE_VIDEO_CAPTURE] = false;
|
| + p[MEDIA_TAB_AUDIO_CAPTURE] = false;
|
| + p[MEDIA_TAB_VIDEO_CAPTURE] = false;
|
| + p[MEDIA_DESKTOP_VIDEO_CAPTURE] = false;
|
| + p[MEDIA_LOOPBACK_AUDIO_CAPTURE] = false;
|
| + p[MEDIA_DEVICE_AUDIO_OUTPUT] = false;
|
| + // Ensure this test gets updated along with |content::MediaStreamType| enum.
|
| + EXPECT_EQ(NUM_MEDIA_TYPES, p.size());
|
| + // Check the the entire policy.
|
| + for (SupportedTrackPolicy::iterator it = p.begin(); it != p.end(); ++it) {
|
| + blink::WebMediaStreamTrack blink_track;
|
| + PrepareTrackWithMediaStreamType(it->first, &blink_track);
|
| + ASSERT_EQ(
|
| + it->second,
|
| + SpeechRecognitionAudioSourceProvider::IsSupportedTrack(blink_track));
|
| + }
|
| +}
|
| +
|
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, RecognizerNotifiedOnSocket) {
|
| + AssertConsumedBuffers(0U);
|
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
|
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 2U);
|
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 3U);
|
| +}
|
| +
|
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, AudioDataIsResampledOnSink) {
|
| + // Fill audio input frames with 0, 1, 2, 3, ..., 440.
|
| + for (size_t i = 0; i < kSourceDataLength; ++i)
|
| + source_data_[i] = i;
|
| +
|
| + const size_t num_frames_to_test = 12;
|
| + const size_t sink_data_length = kOutputFramesPerBuffer * kOutputChannels;
|
| + int16 sink_data[sink_data_length];
|
| + media::AudioBus* sink_bus = recognizer_->audio_bus();
|
| +
|
| + // Render the audio data from the recognizer.
|
| + sink_bus->ToInterleaved(sink_bus->frames(),
|
| + sink_params_.bits_per_sample() / 8, sink_data);
|
| +
|
| + // Test both channels are zeroed out before we trigger resampling.
|
| + for (size_t i = 0; i < num_frames_to_test; ++i) {
|
| + ASSERT_EQ(0, sink_data[i * 2]);
|
| + ASSERT_EQ(0, sink_data[i * 2 + 1]);
|
| + }
|
| +
|
| + // Trigger the source provider to resample the input data.
|
| + AssertConsumedBuffers(0U);
|
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
|
| +
|
| + // Render the audio data from the recognizer.
|
| + sink_bus->ToInterleaved(sink_bus->frames(),
|
| + sink_params_.bits_per_sample() / 8, sink_data);
|
| +
|
| + // Resampled data expected frames - based on |source_data_|.
|
| + // Note: these values also depend on input/output audio params.
|
| + const int16 expected_data[num_frames_to_test] = {0, 2, 5, 8, 11, 13,
|
| + 16, 19, 22, 24, 27, 30};
|
| +
|
| + // Test both channels have same resampled data.
|
| + for (size_t i = 0; i < num_frames_to_test; ++i) {
|
| + ASSERT_EQ(expected_data[i], sink_data[i * 2]);
|
| + ASSERT_EQ(expected_data[i], sink_data[i * 2 + 1]);
|
| + }
|
| +}
|
| +
|
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, SyncSocketFailsSendingData) {
|
| + // (2) Start out with no problems.
|
| + AssertConsumedBuffers(0U);
|
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
|
| +
|
| + // (2) A failure occurs (socket cannot to send).
|
| + recognizer_->SetFailureModeOnForeignSocket(true);
|
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
|
| +}
|
| +
|
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, OnReadyStateChangedOccured) {
|
| + AssertConsumedBuffers(0U);
|
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
|
| + EXPECT_CALL(*this, StoppedCallback()).Times(1);
|
| +
|
| + native_track_->Stop();
|
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
|
| +}
|
| +
|
| +} // namespace content
|
|
|