 Chromium Code Reviews
 Chromium Code Reviews Issue 499233003:
  Binding media stream audio track to speech recognition [renderer]  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master
    
  
    Issue 499233003:
  Binding media stream audio track to speech recognition [renderer]  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master| Index: content/renderer/media/speech_recognition_audio_source_provider_unittest.cc | 
| diff --git a/content/renderer/media/speech_recognition_audio_source_provider_unittest.cc b/content/renderer/media/speech_recognition_audio_source_provider_unittest.cc | 
| new file mode 100644 | 
| index 0000000000000000000000000000000000000000..b9a03ae691765d87d94f6746b8147b521d5b0be5 | 
| --- /dev/null | 
| +++ b/content/renderer/media/speech_recognition_audio_source_provider_unittest.cc | 
| @@ -0,0 +1,463 @@ | 
| +// Copyright 2014 The Chromium Authors. All rights reserved. | 
| +// Use of this source code is governed by a BSD-style license that can be | 
| +// found in the LICENSE file. | 
| + | 
| +#include "content/renderer/media/speech_recognition_audio_source_provider.h" | 
| + | 
| +#include "base/strings/utf_string_conversions.h" | 
| +#include "content/renderer/media/mock_media_constraint_factory.h" | 
| +#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" | 
| +#include "content/renderer/media/webrtc_local_audio_track.h" | 
| +#include "media/audio/audio_parameters.h" | 
| +#include "media/base/audio_bus.h" | 
| +#include "testing/gmock/include/gmock/gmock.h" | 
| +#include "testing/gtest/include/gtest/gtest.h" | 
| +#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" | 
| + | 
| +namespace content { | 
| + | 
| +// Mocked out sockets used for Send/Receive. | 
| +// Data is written and read from a shared buffer used as a FIFO and there is | 
| +// no blocking. |OnSendCB| is used to trigger a |Receive| on the other socket. | 
| +class MockSyncSocket : public base::SyncSocket { | 
| + public: | 
| + // This allows for 2 requests in queue between the |MockSyncSocket|s. | 
| + static const int kSharedBufferSize = 8; | 
| + | 
| + // Buffer to be shared between two |MockSyncSocket|s. Allocated on heap. | 
| + struct SharedBuffer { | 
| + SharedBuffer() : start(0), length(0) {} | 
| + | 
| + uint8 data[kSharedBufferSize]; | 
| + size_t start; | 
| + size_t length; | 
| + }; | 
| + | 
| + // Callback used for pairing an A.Send() with B.Receieve() without blocking. | 
| + typedef base::Callback<void()> OnSendCB; | 
| + | 
| + explicit MockSyncSocket(SharedBuffer* shared_buffer) | 
| + : buffer_(shared_buffer), | 
| + in_failure_mode_(false) { } | 
| + | 
| + MockSyncSocket(SharedBuffer* shared_buffer, const OnSendCB& on_send_cb) | 
| + : buffer_(shared_buffer), | 
| + on_send_cb_(on_send_cb), | 
| + in_failure_mode_(false) { } | 
| + | 
| + virtual size_t Send(const void* buffer, size_t length) OVERRIDE; | 
| + virtual size_t Receive(void* buffer, size_t length) OVERRIDE; | 
| + | 
| + // When |in_failure_mode_| == true, the socket fails to send. | 
| + void SetFailureMode(bool in_failure_mode) { | 
| + in_failure_mode_ = in_failure_mode; | 
| + } | 
| + | 
| + private: | 
| + SharedBuffer* buffer_; | 
| + const OnSendCB on_send_cb_; | 
| + bool in_failure_mode_; | 
| +}; | 
| + | 
| +size_t MockSyncSocket::Send(const void* buffer, size_t length) { | 
| + if (in_failure_mode_) | 
| + return 0; | 
| + | 
| + uint8* b = static_cast<uint8*>(const_cast<void*>(buffer)); | 
| + for (size_t i = 0; i < length; i++, buffer_->length++) | 
| 
burnik
2014/09/23 12:39:21
Changed to prefixed increment.
 | 
| + buffer_->data[buffer_->start + buffer_->length] = b[i]; | 
| + | 
| + on_send_cb_.Run(); | 
| + return length; | 
| +} | 
| + | 
| +size_t MockSyncSocket::Receive(void* buffer, size_t length) { | 
| + uint8* b = static_cast<uint8*>(const_cast<void*>(buffer)); | 
| + for (size_t i = buffer_->start; i < buffer_->length; i++, buffer_->start++) | 
| 
burnik
2014/09/23 12:39:20
Changed to prefixed increment.
 | 
| + b[i] = buffer_->data[buffer_->start]; | 
| + | 
| + // Since buffer is used sequentially, we can reset the buffer indices here. | 
| + buffer_->start = buffer_->length = 0; | 
| + return length; | 
| +} | 
| + | 
| +//////////////////////////////////////////////////////////////////////////////// | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:19
Please remove these non-standard separators.
 
burnik
2014/09/22 09:17:36
Done.
 
no longer working on chromium
2014/09/23 10:09:13
Not done yet.
 
burnik
2014/09/23 12:39:21
Yes, done for next patchset as advertised.
 | 
| + | 
| +class FakeSpeechRecognizer { | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:19
This looks like a very complex helper which is now
 
burnik
2014/09/22 09:17:36
This is the mock consumer. Unit tests focus on the
 
henrika (OOO until Aug 14)
2014/09/23 10:45:33
I am not saying it will fail but that is a large h
 
burnik
2014/09/23 12:39:21
Ok. I'll revisit existing unit tests to see if any
 | 
| + public: | 
| + FakeSpeechRecognizer() : is_responsive_(true) { } | 
| + | 
| + void Initialize( | 
| + const blink::WebMediaStreamTrack& track, | 
| + const media::AudioParameters& sink_params, | 
| + base::SharedMemoryHandle* foreign_memory_handle) { | 
| + // Shared memory is allocated, mapped and shared. | 
| + uint32 shared_memory_size = | 
| + sizeof(media::AudioInputBufferParameters) + | 
| + media::AudioBus::CalculateMemorySize(sink_params); | 
| + shared_memory_.reset(new base::SharedMemory()); | 
| + ASSERT_TRUE(shared_memory_->CreateAndMapAnonymous(shared_memory_size)); | 
| + ASSERT_TRUE(shared_memory_->ShareToProcess(base::GetCurrentProcessHandle(), | 
| + foreign_memory_handle)); | 
| + | 
| + // Wrap the shared memory for the audio bus. | 
| + media::AudioInputBuffer* buffer = | 
| + static_cast<media::AudioInputBuffer*>(shared_memory_->memory()); | 
| + audio_track_bus_ = media::AudioBus::WrapMemory(sink_params, buffer->audio); | 
| + | 
| + // Reference to the counter used to synchronize. | 
| + buffer_index_ = &(buffer->params.size); | 
| + *buffer_index_ = 0U; | 
| + | 
| + // Create a shared buffer for the |MockSyncSocket|s. | 
| + shared_buffer_.reset(new MockSyncSocket::SharedBuffer()); | 
| + | 
| + // Local socket will receive signals from the producer. | 
| + local_socket_.reset(new MockSyncSocket(shared_buffer_.get())); | 
| + | 
| + // We automatically trigger a Receive when data is sent over the socket. | 
| + foreign_socket_ = new MockSyncSocket( | 
| + shared_buffer_.get(), | 
| + base::Bind(&FakeSpeechRecognizer::EmulateReceiveThreadLoopIteration, | 
| + base::Unretained(this))); | 
| + | 
| + // This is usually done to pair the sockets. Here it's not effective. | 
| + base::SyncSocket::CreatePair(local_socket_.get(), foreign_socket_); | 
| + } | 
| + | 
| + // Emulates a single iteraton of a thread receiving on the socket. | 
| + // This would normally be done on a receiving thread's task on the browser. | 
| + void EmulateReceiveThreadLoopIteration() { | 
| + // When not responsive do nothing as if the process is busy. | 
| + if (!is_responsive_) | 
| + return; | 
| + | 
| + local_socket_->Receive(buffer_index_, sizeof(*buffer_index_)); | 
| + // Notify the producer that the audio buffer has been consumed. | 
| + (*buffer_index_)++; | 
| 
no longer working on chromium
2014/09/23 10:09:13
++(*buffer_index_)
 
burnik
2014/09/23 12:39:21
Done.
 | 
| + } | 
| + | 
| + // Used to simulate an unresponsive behaviour of the consumer. | 
| + void SimulateResponsiveness(bool is_responsive) { | 
| + is_responsive_ = is_responsive; | 
| + } | 
| + | 
| + MockSyncSocket* foreign_socket() { return foreign_socket_; } | 
| + media::AudioBus* audio_bus() const { return audio_track_bus_.get(); } | 
| + uint32 buffer_index() { return *buffer_index_; } | 
| + | 
| + private: | 
| + bool is_responsive_; | 
| + | 
| + // Shared memory for the audio and synchronization. | 
| + scoped_ptr<base::SharedMemory> shared_memory_; | 
| + | 
| + // Fake sockets shared buffer. | 
| + scoped_ptr<MockSyncSocket::SharedBuffer> shared_buffer_; | 
| + scoped_ptr<MockSyncSocket> local_socket_; | 
| + MockSyncSocket* foreign_socket_; | 
| 
no longer working on chromium
2014/09/23 10:09:13
why this is a raw pointer?
 
burnik
2014/09/23 12:39:21
It is owned by the recognizer and destroyed there.
 | 
| + | 
| + // Audio bus wrapping the shared memory from the renderer. | 
| + scoped_ptr<media::AudioBus> audio_track_bus_; | 
| + | 
| + // Used for synchronization of sent/received buffers. | 
| + uint32* buffer_index_; | 
| +}; | 
| + | 
| +//////////////////////////////////////////////////////////////////////////////// | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:19
remove
 
burnik
2014/09/22 09:17:36
Done.
 | 
| + | 
| +namespace { | 
| + | 
| +// Supported speech recognition audio parameters. | 
| +const int kSpeechRecognitionSampleRate = 16000; | 
| +const int kSpeechRecognitionFramesPerBuffer = 1600; | 
| + | 
| +// Input audio format. | 
| +const media::AudioParameters::Format kInputFormat = | 
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | 
| +const media::ChannelLayout kInputChannelLayout = media::CHANNEL_LAYOUT_MONO; | 
| +const int kInputChannels = 1; | 
| +const int kInputBitsPerSample = 16; | 
| + | 
| +// Output audio format. | 
| +const media::AudioParameters::Format kOutputFormat = | 
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | 
| +const media::ChannelLayout kOutputChannelLayout = media::CHANNEL_LAYOUT_STEREO; | 
| +const int kOutputChannels = 2; | 
| +const int kOutputBitsPerSample = 16; | 
| + | 
| +} // namespace | 
| + | 
| +//////////////////////////////////////////////////////////////////////////////// | 
| + | 
| +class SpeechRecognitionAudioSourceProviderTest : public testing::Test { | 
| + public: | 
| + SpeechRecognitionAudioSourceProviderTest() { } | 
| + | 
| + // Initializes the producer and consumer with specified audio parameters. | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:18
Can you elaborate on what a producer and consumer
 
burnik
2014/09/22 09:17:36
Yes. It's explained on lines 228 - 238.
 | 
| + // Returns the minimal number of input audio buffers which need to be captured | 
| + // before they get sent to the consumer. | 
| + uint32 Initialize(int input_sample_rate, | 
| + int input_frames_per_buffer, | 
| + int output_sample_rate, | 
| + int output_frames_per_buffer) { | 
| + // Audio Environment setup. | 
| + source_params_.Reset(kInputFormat, | 
| + kInputChannelLayout, | 
| + kInputChannels, | 
| + input_sample_rate, | 
| + kInputBitsPerSample, | 
| + input_frames_per_buffer); | 
| + sink_params_.Reset(kOutputFormat, | 
| + kOutputChannelLayout, | 
| + kOutputChannels, | 
| + output_sample_rate, | 
| + kOutputBitsPerSample, | 
| + output_frames_per_buffer); | 
| + source_data_.reset(new int16[input_frames_per_buffer * kInputChannels]); | 
| + | 
| + // Prepare the track and audio source. | 
| + blink::WebMediaStreamTrack blink_track; | 
| + PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track); | 
| + | 
| + // Get the native track from the blink track and initialize. | 
| + native_track_ = | 
| + static_cast<WebRtcLocalAudioTrack*>(blink_track.extraData()); | 
| + native_track_->OnSetFormat(source_params_); | 
| + | 
| + // Create and initialize the consumer. | 
| + recognizer_.reset(new FakeSpeechRecognizer()); | 
| + base::SharedMemoryHandle foreign_memory_handle; | 
| + recognizer_->Initialize(blink_track, sink_params_, &foreign_memory_handle); | 
| + | 
| + // Create the producer. | 
| + audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider( | 
| + blink_track, sink_params_, foreign_memory_handle, | 
| + recognizer_->foreign_socket(), | 
| + base::Bind(&SpeechRecognitionAudioSourceProviderTest::StoppedCallback, | 
| + base::Unretained(this)))); | 
| + | 
| + // Return number of buffers needed to trigger resampling and consumption. | 
| + return static_cast<uint32>(std::ceil( | 
| + static_cast<double>(output_frames_per_buffer * input_sample_rate) / | 
| + (input_frames_per_buffer * output_sample_rate))); | 
| + } | 
| + | 
| + // Mock callback expected to be called when the track is stopped. | 
| + MOCK_METHOD0(StoppedCallback, void()); | 
| + | 
| + protected: | 
| + static void PrepareBlinkTrackOfType( | 
| + const MediaStreamType device_type, | 
| + blink::WebMediaStreamTrack* blink_track) { | 
| + // Device info. | 
| + StreamDeviceInfo device_info(device_type, "Mock audio device", | 
| + "mock_audio_device_id"); | 
| + | 
| + // Constraints. | 
| + MockMediaConstraintFactory constraint_factory; | 
| + const blink::WebMediaConstraints constraints = | 
| + constraint_factory.CreateWebMediaConstraints(); | 
| + | 
| + // Capturer. | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:18
These comments does not add much. Please explain w
 
burnik
2014/09/22 09:17:36
All these comments are now removed.
 | 
| + scoped_refptr<WebRtcAudioCapturer> capturer( | 
| + WebRtcAudioCapturer::CreateCapturer(-1, device_info, constraints, NULL, | 
| + NULL)); | 
| + | 
| + // Adapter. | 
| + scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( | 
| + WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)); | 
| + | 
| + // Native track. | 
| + scoped_ptr<WebRtcLocalAudioTrack> native_track( | 
| + new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL)); | 
| + | 
| + // Blink audio source. | 
| + blink::WebMediaStreamSource blink_audio_source; | 
| + blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"), | 
| + blink::WebMediaStreamSource::TypeAudio, | 
| + base::UTF8ToUTF16("dummy_source_name")); | 
| + MediaStreamSource::SourceStoppedCallback cb; | 
| + blink_audio_source.setExtraData( | 
| + new MediaStreamAudioSource(-1, device_info, cb, NULL)); | 
| + | 
| + // Blink track. | 
| + blink_track->initialize(blink::WebString::fromUTF8("dummy_audio_track"), | 
| + blink_audio_source); | 
| + blink_track->setExtraData(native_track.release()); | 
| + } | 
| + | 
| + // Emulates an audio capture device capturing data from the source. | 
| + inline void CaptureAudio(const uint32 buffers) { | 
| + for (uint32 i = 0; i < buffers; ++i) | 
| + native_track_->Capture(source_data_.get(), | 
| + base::TimeDelta::FromMilliseconds(0), 1, false, | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:18
FromMilliseconds(0)?
 
burnik
2014/09/22 09:17:36
Yes, no delay is required in the unit test.
 | 
| + false); | 
| + } | 
| + | 
| + // Used to simulate a problem with sockets. | 
| + void SetFailureModeOnForeignSocket(bool in_failure_mode) { | 
| + recognizer_->foreign_socket()->SetFailureMode(in_failure_mode); | 
| + } | 
| + | 
| + // Helper method for verifying captured audio data has been consumed. | 
| + inline void AssertConsumedBuffers(const uint32 buffer_index) { | 
| + ASSERT_EQ(buffer_index, recognizer_->buffer_index()); | 
| + } | 
| + | 
| + // Helper method for providing audio data to producer and verifying it was | 
| + // consumed on the recognizer. | 
| + inline void CaptureAudioAndAssertConsumedBuffers(const uint32 buffers, | 
| + const uint32 buffer_index) { | 
| + CaptureAudio(buffers); | 
| + AssertConsumedBuffers(buffer_index); | 
| + } | 
| + | 
| + // Helper method to capture and assert consumption at different sample rates | 
| + // and audio buffer sizes. | 
| + inline void AssertConsumptionForAudioParameters( | 
| + const int input_sample_rate, | 
| + const int input_frames_per_buffer, | 
| + const int output_sample_rate, | 
| + const int output_frames_per_buffer, | 
| + const uint32 consumptions) { | 
| + const uint32 kBuffersPerNotification = | 
| + Initialize(input_sample_rate, input_frames_per_buffer, | 
| + output_sample_rate, output_frames_per_buffer); | 
| + AssertConsumedBuffers(0U); | 
| + | 
| + for (uint32 i = 1U; i <= consumptions; ++i) { | 
| + CaptureAudio(kBuffersPerNotification); | 
| + ASSERT_EQ(i, recognizer_->buffer_index()) | 
| + << "Tested at rates: " | 
| + << "In(" << input_sample_rate << ", " << input_frames_per_buffer | 
| + << ") " | 
| + << "Out(" << output_sample_rate << ", " << output_frames_per_buffer | 
| + << ")"; | 
| + } | 
| + } | 
| + | 
| + // Producer. | 
| + scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_; | 
| + | 
| + // Consumer. | 
| + scoped_ptr<FakeSpeechRecognizer> recognizer_; | 
| + | 
| + // Audio related members. | 
| + scoped_ptr<int16[]> source_data_; | 
| + media::AudioParameters source_params_; | 
| + media::AudioParameters sink_params_; | 
| + WebRtcLocalAudioTrack* native_track_; | 
| +}; | 
| + | 
| +//////////////////////////////////////////////////////////////////////////////// | 
| + | 
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, CheckIsSupportedAudioTrack) { | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:18
Could you make the name more clear? CheckIsSupport
 
burnik
2014/09/22 09:17:37
Added comment above test.
 | 
| + typedef std::map<MediaStreamType, bool> SupportedTrackPolicy; | 
| + | 
| + // This test must be aligned with the policy of supported tracks. | 
| + SupportedTrackPolicy p; | 
| + p[MEDIA_NO_SERVICE] = false; | 
| + p[MEDIA_DEVICE_AUDIO_CAPTURE] = true; // The only one supported for now. | 
| + p[MEDIA_DEVICE_VIDEO_CAPTURE] = false; | 
| + p[MEDIA_TAB_AUDIO_CAPTURE] = false; | 
| + p[MEDIA_TAB_VIDEO_CAPTURE] = false; | 
| + p[MEDIA_DESKTOP_VIDEO_CAPTURE] = false; | 
| + p[MEDIA_LOOPBACK_AUDIO_CAPTURE] = false; | 
| + p[MEDIA_DEVICE_AUDIO_OUTPUT] = false; | 
| + | 
| + // Ensure this test gets updated along with |content::MediaStreamType| enum. | 
| + EXPECT_EQ(NUM_MEDIA_TYPES, p.size()); | 
| + | 
| + // Check the the entire policy. | 
| + for (SupportedTrackPolicy::iterator it = p.begin(); it != p.end(); ++it) { | 
| + blink::WebMediaStreamTrack blink_track; | 
| + PrepareBlinkTrackOfType(it->first, &blink_track); | 
| + ASSERT_EQ( | 
| + it->second, | 
| + SpeechRecognitionAudioSourceProvider::IsSupportedTrack(blink_track)); | 
| + } | 
| +} | 
| + | 
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, RecognizerNotifiedOnSocket) { | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:18
Please add some lines of comments above each test
 
burnik
2014/09/22 09:17:37
Done.
 | 
| + const size_t kNumAudioParamTuples = 22; | 
| + const int kAudioParams[kNumAudioParamTuples][2] = { | 
| + {8000, 80}, {8000, 800}, {16000, 160}, {16000, 1600}, | 
| + {32000, 320}, {32000, 3200}, {44100, 441}, {44100, 4410}, | 
| + {48000, 480}, {48000, 4800}, {96000, 960}, {96000, 9600}, | 
| + {11025, 111}, {11025, 1103}, {22050, 221}, {22050, 2205}, | 
| + {88200, 882}, {88200, 8820}, {176400, 1764}, {176400, 17640}, | 
| + {192000, 1920}, {192000, 19200}}; | 
| + | 
| + // Check all listed tuples of input sample rates and buffers sizes. | 
| + for (size_t i = 0; i < kNumAudioParamTuples; ++i) { | 
| + AssertConsumptionForAudioParameters( | 
| + kAudioParams[i][0], kAudioParams[i][1], | 
| + kSpeechRecognitionSampleRate, kSpeechRecognitionFramesPerBuffer, 3U); | 
| + } | 
| +} | 
| + | 
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, AudioDataIsResampledOnSink) { | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:19
Lots of hardcoded values in this test. Makes it di
 
burnik
2014/09/22 09:17:37
Added more comments.
I don't test that the resampl
 | 
| + const uint32 kBuffersPerNotification = Initialize(44100, 441, 16000, 1600); | 
| + | 
| + // Fill audio input frames with 0, 1, 2, 3, ..., 440. | 
| + const uint32 source_data_length = 441 * kInputChannels; | 
| + for (uint32 i = 0; i < source_data_length; ++i) | 
| + source_data_[i] = i; | 
| + | 
| + const uint32 num_frames_to_test = 12; | 
| + const uint32 sink_data_length = 1600 * kOutputChannels; | 
| + int16 sink_data[sink_data_length]; | 
| + media::AudioBus* sink_bus = recognizer_->audio_bus(); | 
| + | 
| + // Render the audio data from the recognizer. | 
| + sink_bus->ToInterleaved(sink_bus->frames(), | 
| + sink_params_.bits_per_sample() / 8, sink_data); | 
| + | 
| + // Test both channels are zeroed out before we trigger resampling. | 
| + for (uint32 i = 0; i < num_frames_to_test; ++i) { | 
| + ASSERT_EQ(0, sink_data[i * 2]); | 
| + ASSERT_EQ(0, sink_data[i * 2 + 1]); | 
| + } | 
| + | 
| + // Trigger the source provider to resample the input data. | 
| + AssertConsumedBuffers(0U); | 
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | 
| + | 
| + // Render the audio data from the recognizer. | 
| + sink_bus->ToInterleaved(sink_bus->frames(), | 
| + sink_params_.bits_per_sample() / 8, sink_data); | 
| + | 
| + // Resampled data expected frames - based on |source_data_|. | 
| + const int16 expected_data[num_frames_to_test] = {0, 2, 5, 8, 11, 13, | 
| + 16, 19, 22, 24, 27, 30}; | 
| + | 
| + // Test both channels have same resampled data. | 
| + for (uint32 i = 0; i < num_frames_to_test; ++i) { | 
| + ASSERT_EQ(expected_data[i], sink_data[i * 2]); | 
| + ASSERT_EQ(expected_data[i], sink_data[i * 2 + 1]); | 
| + } | 
| +} | 
| + | 
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, SyncSocketFailsSendingData) { | 
| + const uint32 kBuffersPerNotification = Initialize(44100, 441, 16000, 1600); | 
| + // (1) Start with no problems on the socket. | 
| 
henrika (OOO until Aug 14)
2014/09/22 08:02:18
Remove (1) and (2)
 
burnik
2014/09/22 09:17:37
Done.
 | 
| + AssertConsumedBuffers(0U); | 
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | 
| + | 
| + // (2) A failure occurs (socket cannot to send). | 
| + SetFailureModeOnForeignSocket(true); | 
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | 
| +} | 
| + | 
| +TEST_F(SpeechRecognitionAudioSourceProviderTest, OnReadyStateChangedOccured) { | 
| + const uint32 kBuffersPerNotification = Initialize(44100, 441, 16000, 1600); | 
| + AssertConsumedBuffers(0U); | 
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | 
| + EXPECT_CALL(*this, StoppedCallback()).Times(1); | 
| + | 
| + native_track_->Stop(); | 
| + CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | 
| +} | 
| + | 
| +} // namespace content |