Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/renderer/speech_recognition_audio_source_provider.h" | |
| 6 | |
| 7 #include "base/logging.h" | |
| 8 #include "base/strings/utf_string_conversions.h" | |
| 9 #include "content/renderer/media/media_stream_audio_source.h" | |
| 10 #include "content/renderer/media/mock_media_constraint_factory.h" | |
| 11 #include "content/renderer/media/rtc_media_constraints.h" | |
| 12 #include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory. h" | |
| 13 #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" | |
| 14 #include "content/renderer/media/webrtc_audio_capturer.h" | |
| 15 #include "content/renderer/media/webrtc_audio_device_impl.h" | |
| 16 #include "content/renderer/media/webrtc_local_audio_source_provider.h" | |
| 17 #include "content/renderer/media/webrtc_local_audio_track.h" | |
| 18 #include "media/audio/audio_parameters.h" | |
| 19 #include "media/base/audio_bus.h" | |
| 20 #include "media/base/audio_capturer_source.h" | |
| 21 #include "testing/gmock/include/gmock/gmock.h" | |
| 22 #include "testing/gtest/include/gtest/gtest.h" | |
| 23 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" | |
| 24 | |
| 25 namespace content { | |
| 26 | |
| 27 // Mocked out sockets used for Send/Receive. | |
| 28 // Data is written and read from a shared buffer used as a FIFO and there is | |
| 29 // no blocking. |OnSendCB| is used to trigger a |Receive| on the other socket. | |
| 30 class MockSyncSocket : public base::SyncSocket { | |
| 31 public: | |
| 32 // This allows for 2 requests in queue between the |MockSyncSocket|s. | |
| 33 static const int kSharedBufferSize = 8; | |
| 34 // Buffer to be shared between two |MockSyncSocket|s. Allocated on heap. | |
| 35 struct SharedBuffer { | |
| 36 SharedBuffer() : start(0), length(0) { } | |
| 37 | |
| 38 uint8 data[kSharedBufferSize]; | |
| 39 size_t start; | |
| 40 size_t length; | |
| 41 }; | |
| 42 | |
| 43 // Callback used for pairing an A.Send() with B.Receieve() without blocking. | |
| 44 typedef base::Callback<void()> OnSendCB; | |
| 45 | |
| 46 explicit MockSyncSocket(SharedBuffer* shared_buffer); | |
| 47 MockSyncSocket(SharedBuffer* shared_buffer, const OnSendCB& on_send_cb); | |
| 48 | |
| 49 virtual size_t Send(const void* buffer, size_t length) OVERRIDE; | |
| 50 virtual size_t Receive(void* buffer, size_t length) OVERRIDE; | |
| 51 | |
| 52 // When |in_failure_mode_| == true, the socket fails to send. | |
| 53 void SetFailureMode(bool in_failure_mode) { | |
| 54 in_failure_mode_ = in_failure_mode; | |
| 55 } | |
| 56 | |
| 57 private: | |
| 58 SharedBuffer* buffer_; | |
| 59 const OnSendCB on_send_cb_; | |
| 60 bool in_failure_mode_; | |
| 61 }; | |
| 62 | |
| 63 MockSyncSocket::MockSyncSocket(SharedBuffer* buffer) | |
| 64 : buffer_(buffer), in_failure_mode_(false) {} | |
| 65 | |
| 66 MockSyncSocket::MockSyncSocket(SharedBuffer* buffer, const OnSendCB& on_send_cb) | |
| 67 : buffer_(buffer), on_send_cb_(on_send_cb), in_failure_mode_(false) {} | |
| 68 | |
| 69 size_t MockSyncSocket::Send(const void* buffer, size_t length) { | |
| 70 if (in_failure_mode_) return 0; | |
|
no longer working on chromium
2014/09/16 12:44:07
new line for the if ()
burnik
2014/09/16 19:10:23
Done.
| |
| 71 uint8* b = static_cast<uint8*>(const_cast<void*>(buffer)); | |
| 72 for (size_t i = 0; i < length; i++, buffer_->length++) | |
| 73 buffer_->data[buffer_->start + buffer_->length] = b[i]; | |
| 74 on_send_cb_.Run(); | |
|
no longer working on chromium
2014/09/16 12:44:07
add an empty line before on_send_cb_.Run();
burnik
2014/09/16 19:10:24
Done.
| |
| 75 return length; | |
| 76 } | |
| 77 | |
| 78 size_t MockSyncSocket::Receive(void* buffer, size_t length) { | |
| 79 uint8* b = static_cast<uint8*>(const_cast<void*>(buffer)); | |
| 80 for (size_t i = buffer_->start; i < buffer_->length; i++, buffer_->start++) | |
| 81 b[i] = buffer_->data[buffer_->start]; | |
| 82 // Since buffer is used atomically, we can reset the buffer indices here. | |
|
no longer working on chromium
2014/09/16 12:44:07
empty line.
burnik
2014/09/16 19:10:24
Done.
| |
| 83 buffer_->start = buffer_->length = 0; | |
| 84 return length; | |
| 85 } | |
| 86 | |
| 87 //////////////////////////////////////////////////////////////////////////////// | |
| 88 | |
| 89 class FakeSpeechRecognizer { | |
| 90 public: | |
| 91 FakeSpeechRecognizer() : is_responsive_(true) {} | |
| 92 ~FakeSpeechRecognizer() {} | |
| 93 | |
| 94 void Initialize( | |
| 95 const blink::WebMediaStreamTrack& track, | |
| 96 const media::AudioParameters& sink_params, | |
| 97 const SpeechRecognitionAudioSourceProvider::OnStoppedCB& on_stopped_cb); | |
| 98 | |
| 99 // TODO(burnik): Move from the recognizer to test. | |
| 100 SpeechRecognitionAudioSourceProvider* SourceProvider(); | |
| 101 | |
| 102 // Emulates a single iteraton of a thread receiving on the socket. | |
| 103 virtual void EmulateReceiveThreadLoopIteration(); | |
|
no longer working on chromium
2014/09/16 12:44:07
why is it virtual?
burnik
2014/09/16 19:10:24
Legacy. Removed.
On 2014/09/16 12:44:07, xians1 wr
| |
| 104 | |
| 105 // Used to simulate an unresponsive behaviour of the consumer. | |
| 106 void SimulateResponsiveness(bool is_responsive) { | |
| 107 is_responsive_ = is_responsive; | |
| 108 } | |
| 109 // Used to simulate a problem with sockets. | |
|
burnik
2014/09/16 19:10:24
Added newline before comment.
| |
| 110 void SetFailureModeOnForeignSocket(bool in_failure_mode) { | |
| 111 DCHECK(foreign_socket_.get()); | |
|
no longer working on chromium
2014/09/16 12:44:07
remove this DCHECK since you derefer the pointer b
burnik
2014/09/16 19:10:24
Done.
| |
| 112 foreign_socket_->SetFailureMode(in_failure_mode); | |
| 113 } | |
| 114 | |
| 115 uint32 buffer_index() { return *shared_buffer_index_; } | |
| 116 media::AudioBus* audio_bus() const { return audio_track_bus_.get(); } | |
| 117 | |
| 118 private: | |
| 119 bool is_responsive_; | |
| 120 // Shared memory for the audio and synchronization. | |
| 121 scoped_ptr<base::SharedMemory> shared_memory_; | |
| 122 | |
| 123 // Fake sockets shared buffer. | |
| 124 scoped_ptr<MockSyncSocket::SharedBuffer> shared_buffer_; | |
| 125 scoped_ptr<MockSyncSocket> local_socket_; | |
| 126 scoped_ptr<MockSyncSocket> foreign_socket_; | |
| 127 | |
| 128 // Audio bus wrapping the shared memory from the renderer. | |
| 129 scoped_ptr<media::AudioBus> audio_track_bus_; | |
| 130 | |
| 131 uint32* shared_buffer_index_; | |
| 132 // Producer. TODO(burnik): this should be outside the recognizer. | |
| 133 scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_; | |
| 134 }; | |
| 135 | |
| 136 void FakeSpeechRecognizer::Initialize( | |
| 137 const blink::WebMediaStreamTrack& track, | |
| 138 const media::AudioParameters& sink_params, | |
| 139 const SpeechRecognitionAudioSourceProvider::OnStoppedCB& on_stopped_cb) { | |
| 140 // Shared memory is allocated, mapped and shared. | |
| 141 uint32 shared_memory_size = sizeof(media::AudioInputBufferParameters) + | |
| 142 media::AudioBus::CalculateMemorySize(sink_params); | |
| 143 shared_memory_.reset(new base::SharedMemory()); | |
| 144 | |
| 145 ASSERT_TRUE(shared_memory_->CreateAndMapAnonymous(shared_memory_size)) | |
| 146 << "Failed to create shared memory"; | |
| 147 | |
| 148 base::SharedMemoryHandle foreign_memory_handle; | |
| 149 ASSERT_TRUE(shared_memory_->ShareToProcess(base::GetCurrentProcessHandle(), | |
| 150 &foreign_memory_handle)) | |
| 151 << "Failed to share memory"; | |
| 152 | |
| 153 media::AudioInputBuffer* buffer = | |
| 154 static_cast<media::AudioInputBuffer*>(shared_memory_->memory()); | |
| 155 audio_track_bus_ = media::AudioBus::WrapMemory(sink_params, buffer->audio); | |
| 156 | |
| 157 // Reference to the counter used to synchronize. | |
| 158 shared_buffer_index_ = &(buffer->params.size); | |
| 159 *shared_buffer_index_ = 0U; | |
| 160 | |
| 161 // Create a shared buffer for the |MockSyncSocket|s. | |
| 162 shared_buffer_.reset(new MockSyncSocket::SharedBuffer()); | |
| 163 | |
| 164 // Local socket will receive signals from the producer. | |
| 165 local_socket_.reset(new MockSyncSocket(shared_buffer_.get())); | |
| 166 | |
| 167 // We automatically trigger a Receive when data is sent over the socket. | |
| 168 foreign_socket_.reset(new MockSyncSocket( | |
| 169 shared_buffer_.get(), | |
| 170 base::Bind(&FakeSpeechRecognizer::EmulateReceiveThreadLoopIteration, | |
| 171 base::Unretained(this)))); | |
| 172 | |
| 173 // This is usually done to pair the sockets. Here it's not effective. | |
| 174 base::SyncSocket::CreatePair(local_socket_.get(), foreign_socket_.get()); | |
| 175 | |
| 176 // Create the producer. TODO(burnik): move out of the recognizer. | |
| 177 audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider( | |
| 178 track, sink_params, foreign_memory_handle, foreign_socket_.get(), | |
| 179 on_stopped_cb)); | |
| 180 } | |
| 181 | |
| 182 // TODO(burnik): Remove from the recognizer. | |
| 183 SpeechRecognitionAudioSourceProvider* FakeSpeechRecognizer::SourceProvider() { | |
| 184 return audio_source_provider_.get(); | |
| 185 } | |
| 186 | |
| 187 // Emulates the receive on the socket. This would normally be done on a | |
| 188 // receiving thread's loop on the browser. | |
| 189 void FakeSpeechRecognizer::EmulateReceiveThreadLoopIteration() { | |
|
no longer working on chromium
2014/09/16 12:44:07
nit, just inline the implementation in lin 103
burnik
2014/09/16 19:10:24
Done.
| |
| 190 // When not responsive do nothing as if the process is busy. | |
| 191 if (!is_responsive_) return; | |
| 192 local_socket_->Receive(shared_buffer_index_, sizeof(*shared_buffer_index_)); | |
| 193 // Notify the producer that the audio buffer has been consumed. | |
| 194 (*shared_buffer_index_)++; | |
| 195 } | |
| 196 | |
| 197 //////////////////////////////////////////////////////////////////////////////// | |
| 198 | |
| 199 // Input audio format | |
|
no longer working on chromium
2014/09/16 12:44:07
nit, end with period
burnik
2014/09/16 19:10:23
Done.
| |
| 200 static const media::AudioParameters::Format kInputFormat = | |
|
no longer working on chromium
2014/09/16 12:44:07
put all these declaration under anonymous namespac
burnik
2014/09/16 19:10:24
Done.
| |
| 201 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | |
| 202 const media::ChannelLayout kInputChannelLayout = media::CHANNEL_LAYOUT_MONO; | |
| 203 const int kInputChannels = 1; | |
| 204 const int kInputSampleRate = 44100; | |
| 205 const int kInputBitsPerSample = 16; | |
| 206 const int kInputFramesPerBuffer = 441; | |
| 207 | |
| 208 // Output audio format | |
| 209 const media::AudioParameters::Format kOutputFormat = | |
| 210 media::AudioParameters::AUDIO_PCM_LOW_LATENCY; | |
| 211 const media::ChannelLayout kOutputChannelLayout = media::CHANNEL_LAYOUT_STEREO; | |
| 212 const int kOutputChannels = 2; | |
| 213 const int kOutputSampleRate = 16000; | |
| 214 const int kOutputBitsPerSample = 16; | |
| 215 const int kOutputFramesPerBuffer = 1600; | |
| 216 | |
| 217 // Minimal number of buffers which trigger a single SyncSocket transfer. | |
| 218 const size_t kBuffersPerNotification = | |
| 219 (kOutputFramesPerBuffer * kInputSampleRate) / | |
| 220 (kInputFramesPerBuffer * kOutputSampleRate); | |
| 221 | |
| 222 //////////////////////////////////////////////////////////////////////////////// | |
| 223 | |
| 224 class SpeechRecognitionAudioSourceProviderTest : public testing::Test { | |
| 225 public: | |
| 226 SpeechRecognitionAudioSourceProviderTest() {} | |
| 227 | |
| 228 // Mock for error callback. | |
|
burnik
2014/09/16 19:10:24
Deprecated. It's mock for when the track is stoppe
| |
| 229 MOCK_METHOD0(StoppedCallback, void()); | |
| 230 | |
| 231 // testing::Test methods. | |
| 232 virtual void SetUp() OVERRIDE { | |
|
no longer working on chromium
2014/09/16 12:44:06
just do these SetUp in the constructor, and use th
burnik
2014/09/16 19:10:23
Ok, can you provide an example?
On 2014/09/16 12:4
| |
| 233 // Audio Environment setup. | |
| 234 source_params_.Reset(kInputFormat, kInputChannelLayout, kInputChannels, | |
| 235 kInputSampleRate, kInputBitsPerSample, | |
| 236 kInputFramesPerBuffer); | |
| 237 | |
| 238 sink_params_.Reset(kOutputFormat, kOutputChannelLayout, kOutputChannels, | |
| 239 kOutputSampleRate, kOutputBitsPerSample, | |
| 240 kOutputFramesPerBuffer); | |
| 241 | |
| 242 source_data_length_ = | |
| 243 source_params_.frames_per_buffer() * source_params_.channels(); | |
| 244 source_data_.reset(new int16[source_data_length_]); | |
| 245 | |
| 246 MockMediaConstraintFactory constraint_factory; | |
| 247 scoped_refptr<WebRtcAudioCapturer> capturer( | |
| 248 WebRtcAudioCapturer::CreateCapturer( | |
| 249 -1, StreamDeviceInfo(), | |
| 250 constraint_factory.CreateWebMediaConstraints(), NULL, NULL)); | |
| 251 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( | |
| 252 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)); | |
| 253 native_track_ = new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL); | |
| 254 native_track_->OnSetFormat(source_params_); | |
| 255 | |
| 256 blink::WebMediaStreamSource audio_source; | |
| 257 audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"), | |
| 258 blink::WebMediaStreamSource::TypeAudio, | |
| 259 base::UTF8ToUTF16("dummy_source_name")); | |
| 260 blink_track_.initialize(blink::WebString::fromUTF8("audio_track"), | |
| 261 audio_source); | |
| 262 blink_track_.setExtraData(native_track_); | |
| 263 | |
| 264 // Create the consumer. | |
| 265 recognizer_ = new FakeSpeechRecognizer(); | |
| 266 recognizer_->Initialize( | |
| 267 blink_track_, sink_params_, | |
| 268 base::Bind(&SpeechRecognitionAudioSourceProviderTest::StoppedCallback, | |
| 269 base::Unretained(this))); | |
| 270 | |
| 271 // Init the producer. | |
| 272 audio_source_provider_.reset(recognizer_->SourceProvider()); | |
| 273 } | |
| 274 | |
| 275 virtual void TearDown() OVERRIDE { blink_track_.reset(); } | |
| 276 | |
| 277 protected: | |
| 278 // TODO(burnik): Recheck steps and simplify method. Try reusing in |SetUp()|. | |
| 279 static blink::WebMediaStreamTrack CreateBlinkTrackWithMediaStreamType( | |
|
burnik
2014/09/16 19:10:24
This method is now refactored, simplified and reus
| |
| 280 const MediaStreamType device_type) { | |
| 281 MockMediaConstraintFactory constraint_factory; | |
| 282 | |
| 283 MediaStreamSource::SourceStoppedCallback cb; | |
| 284 | |
| 285 StreamDeviceInfo device_info(device_type, "Mock audio device", | |
| 286 "mock_audio_device_id"); | |
| 287 WebRtcAudioDeviceImpl* device = new WebRtcAudioDeviceImpl(); | |
|
no longer working on chromium
2014/09/16 12:44:07
you are leaking this |device|.
burnik
2014/09/16 19:10:24
Removed it from test.
On 2014/09/16 12:44:07, xian
| |
| 288 scoped_ptr<MediaStreamAudioSource> stream_audio_source( | |
| 289 new MediaStreamAudioSource(-1, device_info, cb, NULL)); | |
| 290 const blink::WebMediaConstraints constraints = | |
| 291 constraint_factory.CreateWebMediaConstraints(); | |
| 292 MockPeerConnectionDependencyFactory* factory = | |
| 293 new MockPeerConnectionDependencyFactory(); | |
| 294 scoped_refptr<webrtc::AudioSourceInterface> audio_source = | |
| 295 factory->CreateLocalAudioSource(new RTCMediaConstraints(constraints)); | |
| 296 scoped_refptr<WebRtcAudioCapturer> capturer( | |
| 297 WebRtcAudioCapturer::CreateCapturer(-1, device_info, constraints, | |
| 298 device, stream_audio_source.get())); | |
| 299 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( | |
| 300 WebRtcLocalAudioTrackAdapter::Create(std::string(), | |
| 301 audio_source.get())); | |
| 302 scoped_ptr<WebRtcLocalAudioTrack> native_track( | |
| 303 new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL)); | |
| 304 | |
| 305 blink::WebMediaStreamSource blink_audio_source; | |
| 306 blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"), | |
| 307 blink::WebMediaStreamSource::TypeAudio, | |
| 308 base::UTF8ToUTF16("dummy_source_name")); | |
| 309 blink_audio_source.setExtraData(stream_audio_source.release()); | |
| 310 | |
| 311 blink::WebMediaStreamTrack blink_track; | |
| 312 blink_track.initialize(blink::WebString::fromUTF8("audio_track"), | |
| 313 blink_audio_source); | |
| 314 blink_track.setExtraData(native_track.release()); | |
| 315 | |
| 316 return blink_track; | |
| 317 } | |
| 318 | |
| 319 // Emulates an audio capture device capturing data from the source. | |
| 320 inline void CaptureAudio(const size_t buffers) { | |
| 321 DCHECK(native_track_); | |
| 322 for (size_t i = 0; i < buffers; ++i) | |
| 323 native_track_->Capture(source_data_.get(), | |
| 324 base::TimeDelta::FromMilliseconds(0), 1, false, | |
| 325 false); | |
| 326 } | |
| 327 | |
| 328 // Helper method to verify captured audio data has been consumed. | |
| 329 inline void AssertConsumedBuffers(const size_t buffer_index) { | |
| 330 ASSERT_EQ(buffer_index, recognizer_->buffer_index()); | |
| 331 } | |
| 332 | |
| 333 // Helper method to push audio data to producer and verify it was consumed. | |
| 334 inline void CaptureAudioAndAssertConsumedBuffers(const size_t buffers, | |
| 335 const size_t buffer_index) { | |
| 336 CaptureAudio(buffers); | |
| 337 AssertConsumedBuffers(buffer_index); | |
| 338 } | |
| 339 | |
| 340 protected: | |
| 341 // Producer. | |
| 342 scoped_ptr<SpeechRecognitionAudioSourceProvider> audio_source_provider_; | |
| 343 // Consumer. | |
| 344 FakeSpeechRecognizer* recognizer_; | |
| 345 // Audio related members. | |
|
burnik
2014/09/16 19:10:24
Removed unnecessary members.
| |
| 346 size_t source_data_length_; | |
| 347 media::AudioParameters source_params_; | |
| 348 scoped_ptr<int16[]> source_data_; | |
| 349 size_t sink_data_length_; | |
| 350 media::AudioParameters sink_params_; | |
| 351 blink::WebMediaStreamTrack blink_track_; | |
| 352 WebRtcLocalAudioTrack* native_track_; | |
| 353 }; | |
| 354 | |
| 355 //////////////////////////////////////////////////////////////////////////////// | |
| 356 //////////////////////////////////////////////////////////////////////////////// | |
| 357 | |
| 358 TEST_F(SpeechRecognitionAudioSourceProviderTest, CheckAllowedAudioTrackType) { | |
| 359 typedef std::map<MediaStreamType, bool> AllowedAudioTrackSourceTypePolicy; | |
| 360 // This test must be aligned with the policy of allowed tracks. | |
| 361 AllowedAudioTrackSourceTypePolicy p; | |
| 362 p[MEDIA_NO_SERVICE] = false; | |
| 363 p[MEDIA_DEVICE_AUDIO_CAPTURE] = true; // Only one allowed for now. | |
| 364 p[MEDIA_DEVICE_VIDEO_CAPTURE] = false; | |
| 365 p[MEDIA_TAB_AUDIO_CAPTURE] = false; | |
| 366 p[MEDIA_TAB_VIDEO_CAPTURE] = false; | |
| 367 p[MEDIA_DESKTOP_VIDEO_CAPTURE] = false; | |
| 368 p[MEDIA_LOOPBACK_AUDIO_CAPTURE] = false; | |
| 369 p[MEDIA_DEVICE_AUDIO_OUTPUT] = false; | |
| 370 // Ensure this test gets updated along with |content::MediaStreamType| enum. | |
| 371 EXPECT_EQ(NUM_MEDIA_TYPES, p.size()); | |
| 372 // Check the the entire policy. | |
| 373 for (AllowedAudioTrackSourceTypePolicy::iterator it = p.begin(); | |
| 374 it != p.end(); ++it) { | |
| 375 ASSERT_EQ(it->second, | |
| 376 SpeechRecognitionAudioSourceProvider::IsAllowedAudioTrack( | |
| 377 CreateBlinkTrackWithMediaStreamType(it->first))); | |
| 378 } | |
| 379 } | |
| 380 | |
| 381 TEST_F(SpeechRecognitionAudioSourceProviderTest, RecognizerNotifiedOnSocket) { | |
| 382 AssertConsumedBuffers(0U); | |
| 383 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 384 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 2U); | |
| 385 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 3U); | |
| 386 } | |
| 387 | |
| 388 TEST_F(SpeechRecognitionAudioSourceProviderTest, AudioDataIsResampledOnSink) { | |
| 389 // fill audio input frames with 0,1,2,3,...,440 | |
|
no longer working on chromium
2014/09/16 12:44:07
all the comments start with capital letter and end
burnik
2014/09/16 19:10:24
Done.
| |
| 390 for (size_t i = 0; i < source_data_length_; ++i) source_data_[i] = i; | |
|
no longer working on chromium
2014/09/16 12:44:07
empty line
burnik
2014/09/16 19:10:24
Done.
| |
| 391 | |
| 392 const size_t num_frames_to_test = 12; | |
| 393 const size_t sink_data_length = kOutputFramesPerBuffer * kOutputChannels; | |
| 394 int16 sink_data[sink_data_length]; | |
| 395 media::AudioBus* sink_bus = recognizer_->audio_bus(); | |
| 396 | |
| 397 // Render the audio data from the recognizer. | |
| 398 sink_bus->ToInterleaved(sink_bus->frames(), | |
| 399 sink_params_.bits_per_sample() / 8, sink_data); | |
| 400 | |
| 401 // Test both channels are zeroed out before we trigger resampling. | |
| 402 for (size_t i = 0; i < num_frames_to_test; ++i) { | |
| 403 ASSERT_EQ(0, sink_data[i * 2]); | |
| 404 ASSERT_EQ(0, sink_data[i * 2 + 1]); | |
| 405 } | |
| 406 | |
| 407 // Trigger the source provider to resample the input data. | |
| 408 AssertConsumedBuffers(0U); | |
| 409 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 410 | |
| 411 // Render the audio data from the recognizer. | |
| 412 sink_bus->ToInterleaved(sink_bus->frames(), | |
| 413 sink_params_.bits_per_sample() / 8, sink_data); | |
| 414 | |
| 415 // Resampled data expected frames - based on |source_data_|. | |
| 416 // Note: these values also depend on input/output audio params. | |
| 417 const int16 expected_data[num_frames_to_test] = {0, 2, 5, 8, 11, 13, | |
| 418 16, 19, 22, 24, 27, 30}; | |
| 419 | |
| 420 // Test both channels have same resampled data. | |
| 421 for (size_t i = 0; i < num_frames_to_test; ++i) { | |
| 422 ASSERT_EQ(expected_data[i], sink_data[i * 2]); | |
| 423 ASSERT_EQ(expected_data[i], sink_data[i * 2 + 1]); | |
| 424 } | |
| 425 } | |
| 426 | |
| 427 TEST_F(SpeechRecognitionAudioSourceProviderTest, SyncSocketFailsSendingData) { | |
| 428 // (2) Start out with no problems. | |
| 429 AssertConsumedBuffers(0U); | |
| 430 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 431 | |
| 432 // (2) A failure occurs (socket cannot to send). | |
| 433 recognizer_->SetFailureModeOnForeignSocket(true); | |
| 434 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 435 | |
| 436 // (3) Miraculasly recovered from the socket failure. | |
|
burnik
2014/09/16 19:10:24
*Miraculously -- And miraculously removed from tes
| |
| 437 recognizer_->SetFailureModeOnForeignSocket(false); | |
| 438 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 2U); | |
| 439 } | |
| 440 | |
| 441 TEST_F(SpeechRecognitionAudioSourceProviderTest, OnReadyStateChangedOccured) { | |
| 442 AssertConsumedBuffers(0U); | |
| 443 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 444 EXPECT_CALL( | |
| 445 *this, StoppedCallback()) | |
| 446 .Times(1); | |
| 447 | |
| 448 native_track_->Stop(); | |
| 449 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U); | |
| 450 } | |
| 451 | |
| 452 } // namespace content | |
| OLD | NEW |