OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/speech_recognition_audio_sink.h" | 5 #include "content/renderer/media/speech_recognition_audio_sink.h" |
6 | 6 |
| 7 #include <stddef.h> |
| 8 #include <stdint.h> |
| 9 |
7 #include "base/bind.h" | 10 #include "base/bind.h" |
| 11 #include "base/macros.h" |
8 #include "base/strings/utf_string_conversions.h" | 12 #include "base/strings/utf_string_conversions.h" |
9 #include "content/renderer/media/media_stream_audio_source.h" | 13 #include "content/renderer/media/media_stream_audio_source.h" |
10 #include "content/renderer/media/mock_media_constraint_factory.h" | 14 #include "content/renderer/media/mock_media_constraint_factory.h" |
11 #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" | 15 #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" |
12 #include "content/renderer/media/webrtc_local_audio_track.h" | 16 #include "content/renderer/media/webrtc_local_audio_track.h" |
13 #include "media/audio/audio_parameters.h" | 17 #include "media/audio/audio_parameters.h" |
14 #include "media/base/audio_bus.h" | 18 #include "media/base/audio_bus.h" |
15 #include "testing/gmock/include/gmock/gmock.h" | 19 #include "testing/gmock/include/gmock/gmock.h" |
16 #include "testing/gtest/include/gtest/gtest.h" | 20 #include "testing/gtest/include/gtest/gtest.h" |
17 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" | 21 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" |
(...skipping 24 matching lines...) Expand all Loading... |
42 // no blocking. |OnSendCB| is used to trigger a |Receive| on the other socket. | 46 // no blocking. |OnSendCB| is used to trigger a |Receive| on the other socket. |
43 class MockSyncSocket : public base::SyncSocket { | 47 class MockSyncSocket : public base::SyncSocket { |
44 public: | 48 public: |
45 // This allows for 2 requests in queue between the |MockSyncSocket|s. | 49 // This allows for 2 requests in queue between the |MockSyncSocket|s. |
46 static const int kSharedBufferSize = 8; | 50 static const int kSharedBufferSize = 8; |
47 | 51 |
48 // Buffer to be shared between two |MockSyncSocket|s. Allocated on heap. | 52 // Buffer to be shared between two |MockSyncSocket|s. Allocated on heap. |
49 struct SharedBuffer { | 53 struct SharedBuffer { |
50 SharedBuffer() : data(), start(0), length(0) {} | 54 SharedBuffer() : data(), start(0), length(0) {} |
51 | 55 |
52 uint8 data[kSharedBufferSize]; | 56 uint8_t data[kSharedBufferSize]; |
53 size_t start; | 57 size_t start; |
54 size_t length; | 58 size_t length; |
55 }; | 59 }; |
56 | 60 |
57 // Callback used for pairing an A.Send() with B.Receieve() without blocking. | 61 // Callback used for pairing an A.Send() with B.Receieve() without blocking. |
58 typedef base::Callback<void()> OnSendCB; | 62 typedef base::Callback<void()> OnSendCB; |
59 | 63 |
60 explicit MockSyncSocket(SharedBuffer* shared_buffer) | 64 explicit MockSyncSocket(SharedBuffer* shared_buffer) |
61 : buffer_(shared_buffer), | 65 : buffer_(shared_buffer), |
62 in_failure_mode_(false) {} | 66 in_failure_mode_(false) {} |
(...skipping 17 matching lines...) Expand all Loading... |
80 bool in_failure_mode_; | 84 bool in_failure_mode_; |
81 | 85 |
82 DISALLOW_COPY_AND_ASSIGN(MockSyncSocket); | 86 DISALLOW_COPY_AND_ASSIGN(MockSyncSocket); |
83 }; | 87 }; |
84 | 88 |
85 // base::SyncSocket implementation | 89 // base::SyncSocket implementation |
86 size_t MockSyncSocket::Send(const void* buffer, size_t length) { | 90 size_t MockSyncSocket::Send(const void* buffer, size_t length) { |
87 if (in_failure_mode_) | 91 if (in_failure_mode_) |
88 return 0; | 92 return 0; |
89 | 93 |
90 const uint8* b = static_cast<const uint8*>(buffer); | 94 const uint8_t* b = static_cast<const uint8_t*>(buffer); |
91 for (size_t i = 0; i < length; ++i, ++buffer_->length) | 95 for (size_t i = 0; i < length; ++i, ++buffer_->length) |
92 buffer_->data[buffer_->start + buffer_->length] = b[i]; | 96 buffer_->data[buffer_->start + buffer_->length] = b[i]; |
93 | 97 |
94 on_send_cb_.Run(); | 98 on_send_cb_.Run(); |
95 return length; | 99 return length; |
96 } | 100 } |
97 | 101 |
98 size_t MockSyncSocket::Receive(void* buffer, size_t length) { | 102 size_t MockSyncSocket::Receive(void* buffer, size_t length) { |
99 uint8* b = static_cast<uint8*>(buffer); | 103 uint8_t* b = static_cast<uint8_t*>(buffer); |
100 for (size_t i = buffer_->start; i < buffer_->length; ++i, ++buffer_->start) | 104 for (size_t i = buffer_->start; i < buffer_->length; ++i, ++buffer_->start) |
101 b[i] = buffer_->data[buffer_->start]; | 105 b[i] = buffer_->data[buffer_->start]; |
102 | 106 |
103 // Since buffer is used sequentially, we can reset the buffer indices here. | 107 // Since buffer is used sequentially, we can reset the buffer indices here. |
104 buffer_->start = buffer_->length = 0; | 108 buffer_->start = buffer_->length = 0; |
105 return length; | 109 return length; |
106 } | 110 } |
107 | 111 |
108 // This fake class is the consumer used to verify behaviour of the producer. | 112 // This fake class is the consumer used to verify behaviour of the producer. |
109 // The |Initialize()| method shows what the consumer should be responsible for | 113 // The |Initialize()| method shows what the consumer should be responsible for |
110 // in the production code (minus the mocks). | 114 // in the production code (minus the mocks). |
111 class FakeSpeechRecognizer { | 115 class FakeSpeechRecognizer { |
112 public: | 116 public: |
113 FakeSpeechRecognizer() : is_responsive_(true) {} | 117 FakeSpeechRecognizer() : is_responsive_(true) {} |
114 | 118 |
115 void Initialize( | 119 void Initialize( |
116 const blink::WebMediaStreamTrack& track, | 120 const blink::WebMediaStreamTrack& track, |
117 const media::AudioParameters& sink_params, | 121 const media::AudioParameters& sink_params, |
118 base::SharedMemoryHandle* foreign_memory_handle) { | 122 base::SharedMemoryHandle* foreign_memory_handle) { |
119 // Shared memory is allocated, mapped and shared. | 123 // Shared memory is allocated, mapped and shared. |
120 const uint32 kSharedMemorySize = | 124 const uint32_t kSharedMemorySize = |
121 sizeof(media::AudioInputBufferParameters) + | 125 sizeof(media::AudioInputBufferParameters) + |
122 media::AudioBus::CalculateMemorySize(sink_params); | 126 media::AudioBus::CalculateMemorySize(sink_params); |
123 shared_memory_.reset(new base::SharedMemory()); | 127 shared_memory_.reset(new base::SharedMemory()); |
124 ASSERT_TRUE(shared_memory_->CreateAndMapAnonymous(kSharedMemorySize)); | 128 ASSERT_TRUE(shared_memory_->CreateAndMapAnonymous(kSharedMemorySize)); |
125 memset(shared_memory_->memory(), 0, kSharedMemorySize); | 129 memset(shared_memory_->memory(), 0, kSharedMemorySize); |
126 ASSERT_TRUE(shared_memory_->ShareToProcess(base::GetCurrentProcessHandle(), | 130 ASSERT_TRUE(shared_memory_->ShareToProcess(base::GetCurrentProcessHandle(), |
127 foreign_memory_handle)); | 131 foreign_memory_handle)); |
128 | 132 |
129 // Wrap the shared memory for the audio bus. | 133 // Wrap the shared memory for the audio bus. |
130 media::AudioInputBuffer* buffer = | 134 media::AudioInputBuffer* buffer = |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
204 SpeechRecognitionAudioSinkTest() {} | 208 SpeechRecognitionAudioSinkTest() {} |
205 | 209 |
206 ~SpeechRecognitionAudioSinkTest() { | 210 ~SpeechRecognitionAudioSinkTest() { |
207 speech_audio_sink_.reset(); | 211 speech_audio_sink_.reset(); |
208 blink::WebHeap::collectAllGarbageForTesting(); | 212 blink::WebHeap::collectAllGarbageForTesting(); |
209 } | 213 } |
210 | 214 |
211 // Initializes the producer and consumer with specified audio parameters. | 215 // Initializes the producer and consumer with specified audio parameters. |
212 // Returns the minimal number of input audio buffers which need to be captured | 216 // Returns the minimal number of input audio buffers which need to be captured |
213 // before they get sent to the consumer. | 217 // before they get sent to the consumer. |
214 uint32 Initialize(int input_sample_rate, | 218 uint32_t Initialize(int input_sample_rate, |
215 int input_frames_per_buffer, | 219 int input_frames_per_buffer, |
216 int output_sample_rate, | 220 int output_sample_rate, |
217 int output_frames_per_buffer) { | 221 int output_frames_per_buffer) { |
218 // Audio Environment setup. | 222 // Audio Environment setup. |
219 source_params_.Reset(kInputFormat, | 223 source_params_.Reset(kInputFormat, |
220 kInputChannelLayout, | 224 kInputChannelLayout, |
221 input_sample_rate, | 225 input_sample_rate, |
222 kInputBitsPerSample, | 226 kInputBitsPerSample, |
223 input_frames_per_buffer); | 227 input_frames_per_buffer); |
224 sink_params_.Reset(kOutputFormat, | 228 sink_params_.Reset(kOutputFormat, |
225 kOutputChannelLayout, | 229 kOutputChannelLayout, |
226 output_sample_rate, | 230 output_sample_rate, |
227 kOutputBitsPerSample, | 231 kOutputBitsPerSample, |
(...skipping 20 matching lines...) Expand all Loading... |
248 | 252 |
249 // Create the producer. | 253 // Create the producer. |
250 scoped_ptr<base::SyncSocket> sending_socket(recognizer_->sending_socket()); | 254 scoped_ptr<base::SyncSocket> sending_socket(recognizer_->sending_socket()); |
251 speech_audio_sink_.reset(new SpeechRecognitionAudioSink( | 255 speech_audio_sink_.reset(new SpeechRecognitionAudioSink( |
252 blink_track, sink_params_, foreign_memory_handle, | 256 blink_track, sink_params_, foreign_memory_handle, |
253 sending_socket.Pass(), | 257 sending_socket.Pass(), |
254 base::Bind(&SpeechRecognitionAudioSinkTest::StoppedCallback, | 258 base::Bind(&SpeechRecognitionAudioSinkTest::StoppedCallback, |
255 base::Unretained(this)))); | 259 base::Unretained(this)))); |
256 | 260 |
257 // Return number of buffers needed to trigger resampling and consumption. | 261 // Return number of buffers needed to trigger resampling and consumption. |
258 return static_cast<uint32>(std::ceil( | 262 return static_cast<uint32_t>(std::ceil( |
259 static_cast<double>(output_frames_per_buffer * input_sample_rate) / | 263 static_cast<double>(output_frames_per_buffer * input_sample_rate) / |
260 (input_frames_per_buffer * output_sample_rate))); | 264 (input_frames_per_buffer * output_sample_rate))); |
261 } | 265 } |
262 | 266 |
263 // Mock callback expected to be called when the track is stopped. | 267 // Mock callback expected to be called when the track is stopped. |
264 MOCK_METHOD0(StoppedCallback, void()); | 268 MOCK_METHOD0(StoppedCallback, void()); |
265 | 269 |
266 protected: | 270 protected: |
267 // Prepares a blink track of a given MediaStreamType and attaches the native | 271 // Prepares a blink track of a given MediaStreamType and attaches the native |
268 // track which can be used to capture audio data and pass it to the producer. | 272 // track which can be used to capture audio data and pass it to the producer. |
(...skipping 19 matching lines...) Expand all Loading... |
288 false /* remote */, true /* readonly */); | 292 false /* remote */, true /* readonly */); |
289 MediaStreamSource::SourceStoppedCallback cb; | 293 MediaStreamSource::SourceStoppedCallback cb; |
290 blink_audio_source.setExtraData( | 294 blink_audio_source.setExtraData( |
291 new MediaStreamAudioSource(-1, device_info, cb, NULL)); | 295 new MediaStreamAudioSource(-1, device_info, cb, NULL)); |
292 blink_track->initialize(blink::WebString::fromUTF8("dummy_track"), | 296 blink_track->initialize(blink::WebString::fromUTF8("dummy_track"), |
293 blink_audio_source); | 297 blink_audio_source); |
294 blink_track->setExtraData(native_track.release()); | 298 blink_track->setExtraData(native_track.release()); |
295 } | 299 } |
296 | 300 |
297 // Emulates an audio capture device capturing data from the source. | 301 // Emulates an audio capture device capturing data from the source. |
298 inline void CaptureAudio(const uint32 buffers) { | 302 inline void CaptureAudio(const uint32_t buffers) { |
299 for (uint32 i = 0; i < buffers; ++i) { | 303 for (uint32_t i = 0; i < buffers; ++i) { |
300 const base::TimeTicks estimated_capture_time = first_frame_capture_time_ + | 304 const base::TimeTicks estimated_capture_time = first_frame_capture_time_ + |
301 (sample_frames_captured_ * base::TimeDelta::FromSeconds(1) / | 305 (sample_frames_captured_ * base::TimeDelta::FromSeconds(1) / |
302 source_params_.sample_rate()); | 306 source_params_.sample_rate()); |
303 native_track()->Capture(*source_bus_, estimated_capture_time, false); | 307 native_track()->Capture(*source_bus_, estimated_capture_time, false); |
304 sample_frames_captured_ += source_bus_->frames(); | 308 sample_frames_captured_ += source_bus_->frames(); |
305 } | 309 } |
306 } | 310 } |
307 | 311 |
308 // Used to simulate a problem with sockets. | 312 // Used to simulate a problem with sockets. |
309 void SetFailureModeOnForeignSocket(bool in_failure_mode) { | 313 void SetFailureModeOnForeignSocket(bool in_failure_mode) { |
310 recognizer()->sending_socket()->SetFailureMode(in_failure_mode); | 314 recognizer()->sending_socket()->SetFailureMode(in_failure_mode); |
311 } | 315 } |
312 | 316 |
313 // Helper method for verifying captured audio data has been consumed. | 317 // Helper method for verifying captured audio data has been consumed. |
314 inline void AssertConsumedBuffers(const uint32 buffer_index) { | 318 inline void AssertConsumedBuffers(const uint32_t buffer_index) { |
315 ASSERT_EQ(buffer_index, recognizer()->GetAudioInputBuffer()->params.size); | 319 ASSERT_EQ(buffer_index, recognizer()->GetAudioInputBuffer()->params.size); |
316 } | 320 } |
317 | 321 |
318 // Helper method for providing audio data to producer and verifying it was | 322 // Helper method for providing audio data to producer and verifying it was |
319 // consumed on the recognizer. | 323 // consumed on the recognizer. |
320 inline void CaptureAudioAndAssertConsumedBuffers(const uint32 buffers, | 324 inline void CaptureAudioAndAssertConsumedBuffers( |
321 const uint32 buffer_index) { | 325 const uint32_t buffers, |
| 326 const uint32_t buffer_index) { |
322 CaptureAudio(buffers); | 327 CaptureAudio(buffers); |
323 AssertConsumedBuffers(buffer_index); | 328 AssertConsumedBuffers(buffer_index); |
324 } | 329 } |
325 | 330 |
326 // Helper method to capture and assert consumption at different sample rates | 331 // Helper method to capture and assert consumption at different sample rates |
327 // and audio buffer sizes. | 332 // and audio buffer sizes. |
328 inline void AssertConsumptionForAudioParameters( | 333 inline void AssertConsumptionForAudioParameters( |
329 const int input_sample_rate, | 334 const int input_sample_rate, |
330 const int input_frames_per_buffer, | 335 const int input_frames_per_buffer, |
331 const int output_sample_rate, | 336 const int output_sample_rate, |
332 const int output_frames_per_buffer, | 337 const int output_frames_per_buffer, |
333 const uint32 consumptions) { | 338 const uint32_t consumptions) { |
334 const uint32 buffers_per_notification = | 339 const uint32_t buffers_per_notification = |
335 Initialize(input_sample_rate, | 340 Initialize(input_sample_rate, input_frames_per_buffer, |
336 input_frames_per_buffer, | 341 output_sample_rate, output_frames_per_buffer); |
337 output_sample_rate, | |
338 output_frames_per_buffer); | |
339 AssertConsumedBuffers(0U); | 342 AssertConsumedBuffers(0U); |
340 | 343 |
341 for (uint32 i = 1U; i <= consumptions; ++i) { | 344 for (uint32_t i = 1U; i <= consumptions; ++i) { |
342 CaptureAudio(buffers_per_notification); | 345 CaptureAudio(buffers_per_notification); |
343 ASSERT_EQ(i, recognizer()->GetAudioInputBuffer()->params.size) | 346 ASSERT_EQ(i, recognizer()->GetAudioInputBuffer()->params.size) |
344 << "Tested at rates: " | 347 << "Tested at rates: " |
345 << "In(" << input_sample_rate << ", " << input_frames_per_buffer | 348 << "In(" << input_sample_rate << ", " << input_frames_per_buffer |
346 << ") " | 349 << ") " |
347 << "Out(" << output_sample_rate << ", " << output_frames_per_buffer | 350 << "Out(" << output_sample_rate << ", " << output_frames_per_buffer |
348 << ")"; | 351 << ")"; |
349 } | 352 } |
350 } | 353 } |
351 | 354 |
(...skipping 12 matching lines...) Expand all Loading... |
364 // Consumer. | 367 // Consumer. |
365 scoped_ptr<FakeSpeechRecognizer> recognizer_; | 368 scoped_ptr<FakeSpeechRecognizer> recognizer_; |
366 | 369 |
367 // Audio related members. | 370 // Audio related members. |
368 scoped_ptr<media::AudioBus> source_bus_; | 371 scoped_ptr<media::AudioBus> source_bus_; |
369 media::AudioParameters source_params_; | 372 media::AudioParameters source_params_; |
370 media::AudioParameters sink_params_; | 373 media::AudioParameters sink_params_; |
371 WebRtcLocalAudioTrack* native_track_; | 374 WebRtcLocalAudioTrack* native_track_; |
372 | 375 |
373 base::TimeTicks first_frame_capture_time_; | 376 base::TimeTicks first_frame_capture_time_; |
374 int64 sample_frames_captured_; | 377 int64_t sample_frames_captured_; |
375 | 378 |
376 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionAudioSinkTest); | 379 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionAudioSinkTest); |
377 }; | 380 }; |
378 | 381 |
379 // Not all types of tracks are supported. This test checks if that policy is | 382 // Not all types of tracks are supported. This test checks if that policy is |
380 // implemented correctly. | 383 // implemented correctly. |
381 TEST_F(SpeechRecognitionAudioSinkTest, CheckIsSupportedAudioTrack) { | 384 TEST_F(SpeechRecognitionAudioSinkTest, CheckIsSupportedAudioTrack) { |
382 typedef std::map<MediaStreamType, bool> SupportedTrackPolicy; | 385 typedef std::map<MediaStreamType, bool> SupportedTrackPolicy; |
383 | 386 |
384 // This test must be aligned with the policy of supported tracks. | 387 // This test must be aligned with the policy of supported tracks. |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
426 } | 429 } |
427 | 430 |
428 // Checks that the input data is getting resampled to the target sample rate. | 431 // Checks that the input data is getting resampled to the target sample rate. |
429 TEST_F(SpeechRecognitionAudioSinkTest, AudioDataIsResampledOnSink) { | 432 TEST_F(SpeechRecognitionAudioSinkTest, AudioDataIsResampledOnSink) { |
430 EXPECT_GE(kInputChannels, 1); | 433 EXPECT_GE(kInputChannels, 1); |
431 EXPECT_GE(kOutputChannels, 1); | 434 EXPECT_GE(kOutputChannels, 1); |
432 | 435 |
433 // Input audio is sampled at 44.1 KHz with data chunks of 10ms. Desired output | 436 // Input audio is sampled at 44.1 KHz with data chunks of 10ms. Desired output |
434 // is corresponding to the speech recognition engine requirements: 16 KHz with | 437 // is corresponding to the speech recognition engine requirements: 16 KHz with |
435 // 100 ms chunks (1600 frames per buffer). | 438 // 100 ms chunks (1600 frames per buffer). |
436 const uint32 kSourceFrames = 441; | 439 const uint32_t kSourceFrames = 441; |
437 const uint32 buffers_per_notification = | 440 const uint32_t buffers_per_notification = |
438 Initialize(44100, kSourceFrames, 16000, 1600); | 441 Initialize(44100, kSourceFrames, 16000, 1600); |
439 // Fill audio input frames with 0, 1, 2, 3, ..., 440. | 442 // Fill audio input frames with 0, 1, 2, 3, ..., 440. |
440 int16 source_data[kSourceFrames * kInputChannels]; | 443 int16_t source_data[kSourceFrames * kInputChannels]; |
441 for (uint32 i = 0; i < kSourceFrames; ++i) { | 444 for (uint32_t i = 0; i < kSourceFrames; ++i) { |
442 for (int c = 0; c < kInputChannels; ++c) | 445 for (int c = 0; c < kInputChannels; ++c) |
443 source_data[i * kInputChannels + c] = i; | 446 source_data[i * kInputChannels + c] = i; |
444 } | 447 } |
445 source_bus()->FromInterleaved( | 448 source_bus()->FromInterleaved( |
446 source_data, kSourceFrames, sizeof(source_data[0])); | 449 source_data, kSourceFrames, sizeof(source_data[0])); |
447 | 450 |
448 // Prepare sink audio bus and data for rendering. | 451 // Prepare sink audio bus and data for rendering. |
449 media::AudioBus* sink_bus = recognizer()->audio_bus(); | 452 media::AudioBus* sink_bus = recognizer()->audio_bus(); |
450 const uint32 kSinkDataLength = 1600 * kOutputChannels; | 453 const uint32_t kSinkDataLength = 1600 * kOutputChannels; |
451 int16 sink_data[kSinkDataLength] = {0}; | 454 int16_t sink_data[kSinkDataLength] = {0}; |
452 | 455 |
453 // Render the audio data from the recognizer. | 456 // Render the audio data from the recognizer. |
454 sink_bus->ToInterleaved(sink_bus->frames(), | 457 sink_bus->ToInterleaved(sink_bus->frames(), |
455 sink_params().bits_per_sample() / 8, sink_data); | 458 sink_params().bits_per_sample() / 8, sink_data); |
456 | 459 |
457 // Checking only a fraction of the sink frames. | 460 // Checking only a fraction of the sink frames. |
458 const uint32 kNumFramesToTest = 12; | 461 const uint32_t kNumFramesToTest = 12; |
459 | 462 |
460 // Check all channels are zeroed out before we trigger resampling. | 463 // Check all channels are zeroed out before we trigger resampling. |
461 for (uint32 i = 0; i < kNumFramesToTest; ++i) { | 464 for (uint32_t i = 0; i < kNumFramesToTest; ++i) { |
462 for (int c = 0; c < kOutputChannels; ++c) | 465 for (int c = 0; c < kOutputChannels; ++c) |
463 EXPECT_EQ(0, sink_data[i * kOutputChannels + c]); | 466 EXPECT_EQ(0, sink_data[i * kOutputChannels + c]); |
464 } | 467 } |
465 | 468 |
466 // Trigger the speech sink to resample the input data. | 469 // Trigger the speech sink to resample the input data. |
467 AssertConsumedBuffers(0U); | 470 AssertConsumedBuffers(0U); |
468 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); | 471 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); |
469 | 472 |
470 // Render the audio data from the recognizer. | 473 // Render the audio data from the recognizer. |
471 sink_bus->ToInterleaved(sink_bus->frames(), | 474 sink_bus->ToInterleaved(sink_bus->frames(), |
472 sink_params().bits_per_sample() / 8, sink_data); | 475 sink_params().bits_per_sample() / 8, sink_data); |
473 | 476 |
474 // Resampled data expected frames. Extracted based on |source_data|. | 477 // Resampled data expected frames. Extracted based on |source_data|. |
475 const int16 kExpectedData[kNumFramesToTest] = {0, 2, 5, 8, 11, 13, | 478 const int16_t kExpectedData[kNumFramesToTest] = {0, 2, 5, 8, 11, 13, |
476 16, 19, 22, 24, 27, 30}; | 479 16, 19, 22, 24, 27, 30}; |
477 | 480 |
478 // Check all channels have the same resampled data. | 481 // Check all channels have the same resampled data. |
479 for (uint32 i = 0; i < kNumFramesToTest; ++i) { | 482 for (uint32_t i = 0; i < kNumFramesToTest; ++i) { |
480 for (int c = 0; c < kOutputChannels; ++c) | 483 for (int c = 0; c < kOutputChannels; ++c) |
481 EXPECT_EQ(kExpectedData[i], sink_data[i * kOutputChannels + c]); | 484 EXPECT_EQ(kExpectedData[i], sink_data[i * kOutputChannels + c]); |
482 } | 485 } |
483 } | 486 } |
484 | 487 |
485 // Checks that the producer does not misbehave when a socket failure occurs. | 488 // Checks that the producer does not misbehave when a socket failure occurs. |
486 TEST_F(SpeechRecognitionAudioSinkTest, SyncSocketFailsSendingData) { | 489 TEST_F(SpeechRecognitionAudioSinkTest, SyncSocketFailsSendingData) { |
487 const uint32 buffers_per_notification = Initialize(44100, 441, 16000, 1600); | 490 const uint32_t buffers_per_notification = Initialize(44100, 441, 16000, 1600); |
488 // Start with no problems on the socket. | 491 // Start with no problems on the socket. |
489 AssertConsumedBuffers(0U); | 492 AssertConsumedBuffers(0U); |
490 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); | 493 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); |
491 | 494 |
492 // A failure occurs (socket cannot send). | 495 // A failure occurs (socket cannot send). |
493 SetFailureModeOnForeignSocket(true); | 496 SetFailureModeOnForeignSocket(true); |
494 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); | 497 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); |
495 } | 498 } |
496 | 499 |
497 // A very unlikely scenario in which the peer is not synchronizing for a long | 500 // A very unlikely scenario in which the peer is not synchronizing for a long |
498 // time (e.g. 300 ms) which results in dropping cached buffers and restarting. | 501 // time (e.g. 300 ms) which results in dropping cached buffers and restarting. |
499 // We check that the FIFO overflow does not occur and that the producer is able | 502 // We check that the FIFO overflow does not occur and that the producer is able |
500 // to resume. | 503 // to resume. |
501 TEST_F(SpeechRecognitionAudioSinkTest, RepeatedSycnhronizationLag) { | 504 TEST_F(SpeechRecognitionAudioSinkTest, RepeatedSycnhronizationLag) { |
502 const uint32 buffers_per_notification = Initialize(44100, 441, 16000, 1600); | 505 const uint32_t buffers_per_notification = Initialize(44100, 441, 16000, 1600); |
503 | 506 |
504 // Start with no synchronization problems. | 507 // Start with no synchronization problems. |
505 AssertConsumedBuffers(0U); | 508 AssertConsumedBuffers(0U); |
506 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); | 509 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); |
507 | 510 |
508 // Consumer gets out of sync. | 511 // Consumer gets out of sync. |
509 recognizer()->SimulateResponsiveness(false); | 512 recognizer()->SimulateResponsiveness(false); |
510 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); | 513 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); |
511 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); | 514 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); |
512 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); | 515 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); |
513 | 516 |
514 // Consumer recovers. | 517 // Consumer recovers. |
515 recognizer()->SimulateResponsiveness(true); | 518 recognizer()->SimulateResponsiveness(true); |
516 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 2U); | 519 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 2U); |
517 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 3U); | 520 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 3U); |
518 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 4U); | 521 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 4U); |
519 } | 522 } |
520 | 523 |
521 // Checks that an OnStoppedCallback is issued when the track is stopped. | 524 // Checks that an OnStoppedCallback is issued when the track is stopped. |
522 TEST_F(SpeechRecognitionAudioSinkTest, OnReadyStateChangedOccured) { | 525 TEST_F(SpeechRecognitionAudioSinkTest, OnReadyStateChangedOccured) { |
523 const uint32 buffers_per_notification = Initialize(44100, 441, 16000, 1600); | 526 const uint32_t buffers_per_notification = Initialize(44100, 441, 16000, 1600); |
524 AssertConsumedBuffers(0U); | 527 AssertConsumedBuffers(0U); |
525 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); | 528 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); |
526 EXPECT_CALL(*this, StoppedCallback()).Times(1); | 529 EXPECT_CALL(*this, StoppedCallback()).Times(1); |
527 | 530 |
528 native_track()->Stop(); | 531 native_track()->Stop(); |
529 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); | 532 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); |
530 } | 533 } |
531 | 534 |
532 } // namespace content | 535 } // namespace content |
OLD | NEW |