Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/renderer/media/speech_recognition_audio_source_provider.h" | |
| 6 | |
| 7 #include "base/strings/utf_string_conversions.h" | |
| 8 #include "content/renderer/media/mock_media_constraint_factory.h" | |
| 9 #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" | |
| 10 #include "content/renderer/media/webrtc_local_audio_track.h" | |
| 11 #include "media/audio/audio_parameters.h" | |
| 12 #include "media/base/audio_bus.h" | |
| 13 #include "testing/gmock/include/gmock/gmock.h" | |
| 14 #include "testing/gtest/include/gtest/gtest.h" | |
| 15 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" | |
| 16 | |
| 17 namespace content { | |
| 18 | |
| 19 // Mocked out sockets used for Send/Receive. | |
| 20 // Data is written and read from a shared buffer used as a FIFO and there is | |
| 21 // no blocking. |OnSendCB| is used to trigger a |Receive| on the other socket. | |
| 22 class MockSyncSocket : public base::SyncSocket { | |
| 23 public: | |
| 24 // This allows for 2 requests in queue between the |MockSyncSocket|s. | |
| 25 static const int kSharedBufferSize = 8; | |
| 26 | |
| 27 // Buffer to be shared between two |MockSyncSocket|s. Allocated on heap. | |
| 28 struct SharedBuffer { | |
| 29 SharedBuffer() : start(0), length(0) {} | |
|
tommi (sloooow) - chröme
2014/09/24 09:52:00
nit: what about also initializing data?
SharedBuf
burnik
2014/09/24 11:54:22
Done.
| |
| 30 | |
| 31 uint8 data[kSharedBufferSize]; | |
| 32 size_t start; | |
| 33 size_t length; | |
| 34 }; | |
| 35 | |
| 36 // Callback used for pairing an A.Send() with B.Receieve() without blocking. | |
| 37 typedef base::Callback<void()> OnSendCB; | |
| 38 | |
| 39 explicit MockSyncSocket(SharedBuffer* shared_buffer) | |
| 40 : buffer_(shared_buffer), | |
| 41 in_failure_mode_(false) { } | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
nit: {}
burnik
2014/09/24 11:54:22
Done.
| |
| 42 | |
| 43 MockSyncSocket(SharedBuffer* shared_buffer, const OnSendCB& on_send_cb) | |
| 44 : buffer_(shared_buffer), | |
| 45 on_send_cb_(on_send_cb), | |
| 46 in_failure_mode_(false) { } | |
| 47 | |
| 48 virtual size_t Send(const void* buffer, size_t length) OVERRIDE; | |
| 49 virtual size_t Receive(void* buffer, size_t length) OVERRIDE; | |
| 50 | |
| 51 // When |in_failure_mode_| == true, the socket fails to send. | |
| 52 void SetFailureMode(bool in_failure_mode) { | |
| 53 in_failure_mode_ = in_failure_mode; | |
| 54 } | |
| 55 | |
| 56 private: | |
| 57 SharedBuffer* buffer_; | |
| 58 const OnSendCB on_send_cb_; | |
| 59 bool in_failure_mode_; | |
| 60 }; | |
| 61 | |
| 62 size_t MockSyncSocket::Send(const void* buffer, size_t length) { | |
| 63 if (in_failure_mode_) | |
| 64 return 0; | |
| 65 | |
| 66 uint8* b = static_cast<uint8*>(const_cast<void*>(buffer)); | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
is this safe (if it is, please add a comment)? Wh
burnik
2014/09/24 11:54:22
Would this be safe?
const uint8* b = static_cast<
| |
| 67 for (size_t i = 0; i < length; ++i, ++buffer_->length) | |
| 68 buffer_->data[buffer_->start + buffer_->length] = b[i]; | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
hmm... I don't see why you need to cast away the c
burnik
2014/09/24 11:54:22
Acknowledged.
| |
| 69 | |
| 70 on_send_cb_.Run(); | |
| 71 return length; | |
| 72 } | |
| 73 | |
| 74 size_t MockSyncSocket::Receive(void* buffer, size_t length) { | |
| 75 uint8* b = static_cast<uint8*>(const_cast<void*>(buffer)); | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
buffer isn't const, so no need for the const_cast
burnik
2014/09/24 11:54:22
Done.
| |
| 76 for (size_t i = buffer_->start; i < buffer_->length; ++i, ++buffer_->start) | |
| 77 b[i] = buffer_->data[buffer_->start]; | |
| 78 | |
| 79 // Since buffer is used sequentially, we can reset the buffer indices here. | |
| 80 buffer_->start = buffer_->length = 0; | |
| 81 return length; | |
| 82 } | |
| 83 | |
| 84 // This fake class is the consumer used to verify behaviour of the producer. | |
| 85 // The |Initialize()| method shows what the consumer should be responsible for | |
| 86 // in the production code (minus the mocks). | |
| 87 class FakeSpeechRecognizer { | |
| 88 public: | |
| 89 FakeSpeechRecognizer() : is_responsive_(true) { } | |
| 90 | |
| 91 void Initialize( | |
| 92 const blink::WebMediaStreamTrack& track, | |
| 93 const media::AudioParameters& sink_params, | |
| 94 base::SharedMemoryHandle* foreign_memory_handle) { | |
| 95 // Shared memory is allocated, mapped and shared. | |
| 96 uint32 shared_memory_size = | |
| 97 sizeof(media::AudioInputBufferParameters) + | |
| 98 media::AudioBus::CalculateMemorySize(sink_params); | |
| 99 shared_memory_.reset(new base::SharedMemory()); | |
| 100 ASSERT_TRUE(shared_memory_->CreateAndMapAnonymous(shared_memory_size)); | |
| 101 ASSERT_TRUE(shared_memory_->ShareToProcess(base::GetCurrentProcessHandle(), | |
| 102 foreign_memory_handle)); | |
| 103 | |
| 104 // Wrap the shared memory for the audio bus. | |
| 105 media::AudioInputBuffer* buffer = | |
| 106 static_cast<media::AudioInputBuffer*>(shared_memory_->memory()); | |
| 107 audio_track_bus_ = media::AudioBus::WrapMemory(sink_params, buffer->audio); | |
| 108 | |
| 109 // Reference to the counter used to synchronize. | |
| 110 buffer_index_ = &(buffer->params.size); | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
what about just having a member variable of type m
burnik
2014/09/24 11:54:22
I think AudioInputBuffer is a terrible name for th
| |
| 111 *buffer_index_ = 0U; | |
| 112 | |
| 113 // Create a shared buffer for the |MockSyncSocket|s. | |
| 114 shared_buffer_.reset(new MockSyncSocket::SharedBuffer()); | |
| 115 | |
| 116 // Local socket will receive signals from the producer. | |
| 117 local_socket_.reset(new MockSyncSocket(shared_buffer_.get())); | |
| 118 | |
| 119 // We automatically trigger a Receive when data is sent over the socket. | |
| 120 foreign_socket_ = new MockSyncSocket( | |
| 121 shared_buffer_.get(), | |
| 122 base::Bind(&FakeSpeechRecognizer::EmulateReceiveThreadLoopIteration, | |
| 123 base::Unretained(this))); | |
| 124 | |
| 125 // This is usually done to pair the sockets. Here it's not effective. | |
| 126 base::SyncSocket::CreatePair(local_socket_.get(), foreign_socket_); | |
| 127 } | |
| 128 | |
| 129 // Emulates a single iteraton of a thread receiving on the socket. | |
| 130 // This would normally be done on a receiving thread's task on the browser. | |
| 131 void EmulateReceiveThreadLoopIteration() { | |
| 132 // When not responsive do nothing as if the process is busy. | |
| 133 if (!is_responsive_) | |
| 134 return; | |
| 135 | |
| 136 local_socket_->Receive(buffer_index_, sizeof(*buffer_index_)); | |
| 137 // Notify the producer that the audio buffer has been consumed. | |
| 138 ++(*buffer_index_); | |
| 139 } | |
| 140 | |
| 141 // Used to simulate an unresponsive behaviour of the consumer. | |
| 142 void SimulateResponsiveness(bool is_responsive) { | |
| 143 is_responsive_ = is_responsive; | |
| 144 } | |
| 145 | |
| 146 MockSyncSocket* foreign_socket() { return foreign_socket_; } | |
| 147 media::AudioBus* audio_bus() const { return audio_track_bus_.get(); } | |
| 148 uint32 buffer_index() { return *buffer_index_; } | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
isn't this returning 'size' rather than a buffer i
burnik
2014/09/24 11:54:22
Again. I'm actually using 'size' to count the buff
| |
| 149 | |
| 150 private: | |
| 151 bool is_responsive_; | |
| 152 | |
| 153 // Shared memory for the audio and synchronization. | |
| 154 scoped_ptr<base::SharedMemory> shared_memory_; | |
| 155 | |
| 156 // Fake sockets and their shared buffer. | |
| 157 scoped_ptr<MockSyncSocket::SharedBuffer> shared_buffer_; | |
| 158 scoped_ptr<MockSyncSocket> local_socket_; | |
| 159 MockSyncSocket* foreign_socket_; | |
| 160 | |
| 161 // Audio bus wrapping the shared memory from the renderer. | |
| 162 scoped_ptr<media::AudioBus> audio_track_bus_; | |
| 163 | |
| 164 // Used for synchronization of sent/received buffers. | |
| 165 uint32* buffer_index_; | |
| 166 }; | |
| 167 | |
| 168 namespace { | |
| 169 | |
| 170 // Supported speech recognition audio parameters. | |
| 171 const int kSpeechRecognitionSampleRate = 16000; | |
| 172 const int kSpeechRecognitionFramesPerBuffer = 1600; | |
| 173 | |
| 174 // Input audio format. | |
| 175 const media::AudioParameters::Format kInputFormat = | |
| 176 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | |
| 177 const media::ChannelLayout kInputChannelLayout = media::CHANNEL_LAYOUT_MONO; | |
| 178 const int kInputChannels = 1; | |
| 179 const int kInputBitsPerSample = 16; | |
| 180 | |
| 181 // Output audio format. | |
| 182 const media::AudioParameters::Format kOutputFormat = | |
| 183 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | |
| 184 const media::ChannelLayout kOutputChannelLayout = media::CHANNEL_LAYOUT_STEREO; | |
| 185 const int kOutputChannels = 2; | |
| 186 const int kOutputBitsPerSample = 16; | |
| 187 | |
| 188 } // namespace | |
| 189 | |
| 190 class SpeechRecognitionAudioSourceProviderTest : public testing::Test { | |
| 191 public: | |
| 192 SpeechRecognitionAudioSourceProviderTest() { } | |
| 193 | |
| 194 // Initializes the producer and consumer with specified audio parameters. | |
| 195 // Returns the minimal number of input audio buffers which need to be captured | |
| 196 // before they get sent to the consumer. | |
| 197 uint32 Initialize(int input_sample_rate, | |
| 198 int input_frames_per_buffer, | |
| 199 int output_sample_rate, | |
| 200 int output_frames_per_buffer) { | |
| 201 // Audio Environment setup. | |
| 202 source_params_.Reset(kInputFormat, | |
| 203 kInputChannelLayout, | |
| 204 kInputChannels, | |
| 205 input_sample_rate, | |
| 206 kInputBitsPerSample, | |
| 207 input_frames_per_buffer); | |
| 208 sink_params_.Reset(kOutputFormat, | |
| 209 kOutputChannelLayout, | |
| 210 kOutputChannels, | |
| 211 output_sample_rate, | |
| 212 kOutputBitsPerSample, | |
| 213 output_frames_per_buffer); | |
| 214 source_data_.reset(new int16[input_frames_per_buffer * kInputChannels]); | |
| 215 | |
| 216 // Prepare the track and audio source. | |
| 217 blink::WebMediaStreamTrack blink_track; | |
| 218 PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track); | |
| 219 | |
| 220 // Get the native track from the blink track and initialize. | |
| 221 native_track_ = | |
| 222 static_cast<WebRtcLocalAudioTrack*>(blink_track.extraData()); | |
| 223 native_track_->OnSetFormat(source_params_); | |
| 224 | |
| 225 // Create and initialize the consumer. | |
| 226 recognizer_.reset(new FakeSpeechRecognizer()); | |
| 227 base::SharedMemoryHandle foreign_memory_handle; | |
| 228 recognizer_->Initialize(blink_track, sink_params_, &foreign_memory_handle); | |
| 229 | |
| 230 // Create the producer. | |
| 231 audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider( | |
| 232 blink_track, sink_params_, foreign_memory_handle, | |
| 233 recognizer_->foreign_socket(), | |
| 234 base::Bind(&SpeechRecognitionAudioSourceProviderTest::StoppedCallback, | |
| 235 base::Unretained(this)))); | |
| 236 | |
| 237 // Return number of buffers needed to trigger resampling and consumption. | |
| 238 return static_cast<uint32>(std::ceil( | |
| 239 static_cast<double>(output_frames_per_buffer * input_sample_rate) / | |
| 240 (input_frames_per_buffer * output_sample_rate))); | |
| 241 } | |
| 242 | |
| 243 // Mock callback expected to be called when the track is stopped. | |
| 244 MOCK_METHOD0(StoppedCallback, void()); | |
| 245 | |
| 246 protected: | |
| 247 // Prepares a blink track of a given MediaStreamType and attaches the native | |
| 248 // track which can be used to capture audio data and pass it to the producer. | |
| 249 static void PrepareBlinkTrackOfType( | |
| 250 const MediaStreamType device_type, | |
| 251 blink::WebMediaStreamTrack* blink_track) { | |
| 252 StreamDeviceInfo device_info(device_type, "Mock device", | |
| 253 "mock_device_id"); | |
| 254 MockMediaConstraintFactory constraint_factory; | |
| 255 const blink::WebMediaConstraints constraints = | |
| 256 constraint_factory.CreateWebMediaConstraints(); | |
| 257 scoped_refptr<WebRtcAudioCapturer> capturer( | |
| 258 WebRtcAudioCapturer::CreateCapturer(-1, device_info, constraints, NULL, | |
| 259 NULL)); | |
| 260 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( | |
| 261 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)); | |
| 262 scoped_ptr<WebRtcLocalAudioTrack> native_track( | |
| 263 new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL)); | |
| 264 blink::WebMediaStreamSource blink_audio_source; | |
| 265 blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"), | |
| 266 blink::WebMediaStreamSource::TypeAudio, | |
| 267 base::UTF8ToUTF16("dummy_source_name")); | |
| 268 MediaStreamSource::SourceStoppedCallback cb; | |
| 269 blink_audio_source.setExtraData( | |
| 270 new MediaStreamAudioSource(-1, device_info, cb, NULL)); | |
| 271 blink_track->initialize(blink::WebString::fromUTF8("dummy_track"), | |
| 272 blink_audio_source); | |
| 273 blink_track->setExtraData(native_track.release()); | |
| 274 } | |
| 275 | |
| 276 // Emulates an audio capture device capturing data from the source. | |
| 277 inline void CaptureAudio(const uint32 buffers) { | |
| 278 for (uint32 i = 0; i < buffers; ++i) | |
| 279 native_track_->Capture(source_data_.get(), | |
| 280 base::TimeDelta::FromMilliseconds(0), 1, false, | |
| 281 false); | |
| 282 } | |
| 283 | |
| 284 // Used to simulate a problem with sockets. | |
| 285 void SetFailureModeOnForeignSocket(bool in_failure_mode) { | |
| 286 recognizer_->foreign_socket()->SetFailureMode(in_failure_mode); | |
| 287 } | |
| 288 | |
| 289 // Helper method for verifying captured audio data has been consumed. | |
| 290 inline void AssertConsumedBuffers(const uint32 buffer_index) { | |
| 291 ASSERT_EQ(buffer_index, recognizer_->buffer_index()); | |
| 292 } | |
| 293 | |
| 294 // Helper method for providing audio data to producer and verifying it was | |
| 295 // consumed on the recognizer. | |
| 296 inline void CaptureAudioAndAssertConsumedBuffers(const uint32 buffers, | |
| 297 const uint32 buffer_index) { | |
| 298 CaptureAudio(buffers); | |
| 299 AssertConsumedBuffers(buffer_index); | |
| 300 } | |
| 301 | |
| 302 // Helper method to capture and assert consumption at different sample rates | |
| 303 // and audio buffer sizes. | |
| 304 inline void AssertConsumptionForAudioParameters( | |
| 305 const int input_sample_rate, | |
| 306 const int input_frames_per_buffer, | |
| 307 const int output_sample_rate, | |
| 308 const int output_frames_per_buffer, | |
| 309 const uint32 consumptions) { | |
| 310 const uint32 kBuffersPerNotification = | |
| 311 Initialize(input_sample_rate, input_frames_per_buffer, | |
| 312 output_sample_rate, output_frames_per_buffer); | |
| 313 AssertConsumedBuffers(0U); | |
| 314 | |
| 315 for (uint32 i = 1U; i <= consumptions; ++i) { | |
| 316 CaptureAudio(kBuffersPerNotification); | |
| 317 ASSERT_EQ(i, recognizer_->buffer_index()) | |
| 318 << "Tested at rates: " | |
| 319 << "In(" << input_sample_rate << ", " << input_frames_per_buffer | |
| 320 << ") " | |
| 321 << "Out(" << output_sample_rate << ", " << output_frames_per_buffer | |
| 322 << ")"; | |
| 323 } | |
| 324 } | |
| 325 | |
| 326 // Producer. | |
| 327 scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_; | |
| 328 | |
| 329 // Consumer. | |
| 330 scoped_ptr<FakeSpeechRecognizer> recognizer_; | |
| 331 | |
| 332 // Audio related members. | |
| 333 scoped_ptr<int16[]> source_data_; | |
| 334 media::AudioParameters source_params_; | |
| 335 media::AudioParameters sink_params_; | |
| 336 WebRtcLocalAudioTrack* native_track_; | |
| 337 }; | |
| 338 | |
| 339 // Not all types of tracks are supported. This test checks if that policy is | |
| 340 // implemented correctly. | |
| 341 TEST_F(SpeechRecognitionAudioSourceProviderTest, CheckIsSupportedAudioTrack) { | |
| 342 typedef std::map<MediaStreamType, bool> SupportedTrackPolicy; | |
| 343 | |
| 344 // This test must be aligned with the policy of supported tracks. | |
| 345 SupportedTrackPolicy p; | |
| 346 p[MEDIA_NO_SERVICE] = false; | |
| 347 p[MEDIA_DEVICE_AUDIO_CAPTURE] = true; // The only one supported for now. | |
| 348 p[MEDIA_DEVICE_VIDEO_CAPTURE] = false; | |
| 349 p[MEDIA_TAB_AUDIO_CAPTURE] = false; | |
| 350 p[MEDIA_TAB_VIDEO_CAPTURE] = false; | |
| 351 p[MEDIA_DESKTOP_VIDEO_CAPTURE] = false; | |
| 352 p[MEDIA_LOOPBACK_AUDIO_CAPTURE] = false; | |
| 353 p[MEDIA_DEVICE_AUDIO_OUTPUT] = false; | |
| 354 | |
| 355 // Ensure this test gets updated along with |content::MediaStreamType| enum. | |
| 356 EXPECT_EQ(NUM_MEDIA_TYPES, p.size()); | |
| 357 | |
| 358 // Check the the entire policy. | |
| 359 for (SupportedTrackPolicy::iterator it = p.begin(); it != p.end(); ++it) { | |
| 360 blink::WebMediaStreamTrack blink_track; | |
| 361 PrepareBlinkTrackOfType(it->first, &blink_track); | |
| 362 ASSERT_EQ( | |
| 363 it->second, | |
| 364 SpeechRecognitionAudioSourceProvider::IsSupportedTrack(blink_track)); | |
| 365 } | |
| 366 } | |
| 367 | |
| 368 // Checks if the producer can support the listed range of input sample rates | |
| 369 // and associated buffer sizes. | |
| 370 TEST_F(SpeechRecognitionAudioSourceProviderTest, RecognizerNotifiedOnSocket) { | |
| 371 const size_t kNumAudioParamTuples = 22; | |
| 372 const int kAudioParams[kNumAudioParamTuples][2] = { | |
|
tommi (sloooow) - chröme
2014/09/24 09:52:00
add 24000?
burnik
2014/09/24 11:54:22
Done.
| |
| 373 {8000, 80}, {8000, 800}, {16000, 160}, {16000, 1600}, | |
| 374 {32000, 320}, {32000, 3200}, {44100, 441}, {44100, 4410}, | |
| 375 {48000, 480}, {48000, 4800}, {96000, 960}, {96000, 9600}, | |
| 376 {11025, 111}, {11025, 1103}, {22050, 221}, {22050, 2205}, | |
| 377 {88200, 882}, {88200, 8820}, {176400, 1764}, {176400, 17640}, | |
| 378 {192000, 1920}, {192000, 19200}}; | |
| 379 | |
| 380 // Check all listed tuples of input sample rates and buffers sizes. | |
| 381 for (size_t i = 0; i < kNumAudioParamTuples; ++i) { | |
| 382 AssertConsumptionForAudioParameters( | |
| 383 kAudioParams[i][0], kAudioParams[i][1], | |
| 384 kSpeechRecognitionSampleRate, kSpeechRecognitionFramesPerBuffer, 3U); | |
| 385 } | |
| 386 } | |
| 387 | |
| 388 // Checks that the input data is getting resampled to the target sample rate. | |
| 389 TEST_F(SpeechRecognitionAudioSourceProviderTest, AudioDataIsResampledOnSink) { | |
| 390 EXPECT_GE(kInputChannels, 1); | |
| 391 EXPECT_GE(kOutputChannels, 1); | |
| 392 | |
| 393 // Input audio is sampled at 44.1 KHz with data chunks of 10ms. Desired output | |
| 394 // is corresponding to the speech recognition engine requirements: 16 KHz with | |
| 395 // 100 ms chunks (1600 frames per buffer). | |
| 396 const uint32 kBuffersPerNotification = Initialize(44100, 441, 16000, 1600); | |
| 397 | |
| 398 // Fill audio input frames with 0, 1, 2, 3, ..., 440. | |
| 399 const uint32 kSourceDataLength = 441 * kInputChannels; | |
| 400 for (uint32 i = 0; i < kSourceDataLength; ++i) | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
{}
burnik
2014/09/24 11:54:22
Done.
| |
| 401 for (int c = 0; c < kInputChannels; ++c) | |
| 402 source_data_[i * kInputChannels + c] = i; | |
| 403 | |
| 404 // Prepare sink audio bus and data for rendering. | |
| 405 media::AudioBus* sink_bus = recognizer_->audio_bus(); | |
| 406 const uint32 kSinkDataLength = 1600 * kOutputChannels; | |
| 407 int16 sink_data[kSinkDataLength]; | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
nit: = {0};
burnik
2014/09/24 11:54:22
Done.
Does this array init-to-zero on stack work f
| |
| 408 | |
| 409 // Render the audio data from the recognizer. | |
| 410 sink_bus->ToInterleaved(sink_bus->frames(), | |
| 411 sink_params_.bits_per_sample() / 8, sink_data); | |
| 412 | |
| 413 // Checking only a fraction of the sink frames. | |
| 414 const uint32 kNumFramesToTest = 12; | |
| 415 | |
| 416 // Check all channels are zeroed out before we trigger resampling. | |
| 417 for (uint32 i = 0; i < kNumFramesToTest; ++i) | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
{}
burnik
2014/09/24 11:54:22
Done.
| |
| 418 for (int c = 0; c < kOutputChannels; ++c) | |
| 419 ASSERT_EQ(0, sink_data[i * kOutputChannels + c]); | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
EXPECT_EQ?
burnik
2014/09/24 11:54:22
Done.
| |
| 420 | |
| 421 // Trigger the source provider to resample the input data. | |
| 422 AssertConsumedBuffers(0U); | |
| 423 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 424 | |
| 425 // Render the audio data from the recognizer. | |
| 426 sink_bus->ToInterleaved(sink_bus->frames(), | |
| 427 sink_params_.bits_per_sample() / 8, sink_data); | |
| 428 | |
| 429 // Resampled data expected frames. Extracted based on |source_data_|. | |
| 430 const int16 kExpectedData[kNumFramesToTest] = {0, 2, 5, 8, 11, 13, | |
| 431 16, 19, 22, 24, 27, 30}; | |
| 432 | |
| 433 // Check all channels have the same resampled data. | |
| 434 for (uint32 i = 0; i < kNumFramesToTest; ++i) | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
{}
burnik
2014/09/24 11:54:22
Done.
| |
| 435 for (int c = 0; c < kOutputChannels; ++c) | |
| 436 ASSERT_EQ(kExpectedData[i], sink_data[i * kOutputChannels + c]); | |
|
tommi (sloooow) - chröme
2014/09/24 09:51:59
EXPECT_EQ?
burnik
2014/09/24 11:54:22
Done.
| |
| 437 } | |
| 438 | |
| 439 // Checks that the producer does not misbehave when a socket failure occurs. | |
| 440 TEST_F(SpeechRecognitionAudioSourceProviderTest, SyncSocketFailsSendingData) { | |
| 441 const uint32 kBuffersPerNotification = Initialize(44100, 441, 16000, 1600); | |
| 442 // Start with no problems on the socket. | |
| 443 AssertConsumedBuffers(0U); | |
| 444 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 445 | |
| 446 // A failure occurs (socket cannot send). | |
| 447 SetFailureModeOnForeignSocket(true); | |
| 448 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 449 } | |
| 450 | |
| 451 // Checks that an OnStoppedCallback is issued when the track is stopped. | |
| 452 TEST_F(SpeechRecognitionAudioSourceProviderTest, OnReadyStateChangedOccured) { | |
| 453 const uint32 kBuffersPerNotification = Initialize(44100, 441, 16000, 1600); | |
| 454 AssertConsumedBuffers(0U); | |
| 455 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 456 EXPECT_CALL(*this, StoppedCallback()).Times(1); | |
| 457 | |
| 458 native_track_->Stop(); | |
| 459 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 460 } | |
| 461 | |
| 462 } // namespace content | |
| OLD | NEW |