Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(740)

Side by Side Diff: content/renderer/media/speech_recognition_audio_sink_unittest.cc

Issue 499233003: Binding media stream audio track to speech recognition [renderer] (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Unit test nits + ctor comments Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/renderer/media/speech_recognition_audio_sink.h"
6
7 #include "base/strings/utf_string_conversions.h"
8 #include "content/renderer/media/mock_media_constraint_factory.h"
9 #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
10 #include "content/renderer/media/webrtc_local_audio_track.h"
11 #include "media/audio/audio_parameters.h"
12 #include "media/base/audio_bus.h"
13 #include "testing/gmock/include/gmock/gmock.h"
14 #include "testing/gtest/include/gtest/gtest.h"
15 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
16
17 namespace {
18
19 // Supported speech recognition audio parameters.
20 const int kSpeechRecognitionSampleRate = 16000;
21 const int kSpeechRecognitionFramesPerBuffer = 1600;
22
23 // Input audio format.
24 const media::AudioParameters::Format kInputFormat =
25 media::AudioParameters::AUDIO_PCM_LOW_LATENCY;
26 const media::ChannelLayout kInputChannelLayout = media::CHANNEL_LAYOUT_MONO;
27 const int kInputChannels = 1;
28 const int kInputBitsPerSample = 16;
29
30 // Output audio format.
31 const media::AudioParameters::Format kOutputFormat =
32 media::AudioParameters::AUDIO_PCM_LOW_LATENCY;
33 const media::ChannelLayout kOutputChannelLayout = media::CHANNEL_LAYOUT_STEREO;
34 const int kOutputChannels = 2;
35 const int kOutputBitsPerSample = 16;
36
37 // Mocked out sockets used for Send/Receive.
38 // Data is written and read from a shared buffer used as a FIFO and there is
39 // no blocking. |OnSendCB| is used to trigger a |Receive| on the other socket.
40 class MockSyncSocket : public base::SyncSocket {
41 public:
42 // This allows for 2 requests in queue between the |MockSyncSocket|s.
43 static const int kSharedBufferSize = 8;
44
45 // Buffer to be shared between two |MockSyncSocket|s. Allocated on heap.
46 struct SharedBuffer {
47 SharedBuffer() : data(), start(0), length(0) {}
48
49 uint8 data[kSharedBufferSize];
50 size_t start;
51 size_t length;
52 };
53
54 // Callback used for pairing an A.Send() with B.Receieve() without blocking.
55 typedef base::Callback<void()> OnSendCB;
56
57 explicit MockSyncSocket(SharedBuffer* shared_buffer)
58 : buffer_(shared_buffer),
59 in_failure_mode_(false) {}
60
61 MockSyncSocket(SharedBuffer* shared_buffer, const OnSendCB& on_send_cb)
62 : buffer_(shared_buffer),
63 on_send_cb_(on_send_cb),
64 in_failure_mode_(false) {}
65
66 virtual size_t Send(const void* buffer, size_t length) OVERRIDE;
67 virtual size_t Receive(void* buffer, size_t length) OVERRIDE;
68
69 // When |in_failure_mode_| == true, the socket fails to send.
70 void SetFailureMode(bool in_failure_mode) {
71 in_failure_mode_ = in_failure_mode;
72 }
73
74 private:
75 SharedBuffer* buffer_;
76 const OnSendCB on_send_cb_;
77 bool in_failure_mode_;
78
79 DISALLOW_COPY_AND_ASSIGN(MockSyncSocket);
80 };
81
82 // base::SyncSocket implementation
83 size_t MockSyncSocket::Send(const void* buffer, size_t length) {
84 if (in_failure_mode_)
85 return 0;
86
87 const uint8* b = static_cast<const uint8*>(buffer);
88 for (size_t i = 0; i < length; ++i, ++buffer_->length)
89 buffer_->data[buffer_->start + buffer_->length] = b[i];
90
91 on_send_cb_.Run();
92 return length;
93 }
94
95 size_t MockSyncSocket::Receive(void* buffer, size_t length) {
96 uint8* b = static_cast<uint8*>(buffer);
97 for (size_t i = buffer_->start; i < buffer_->length; ++i, ++buffer_->start)
98 b[i] = buffer_->data[buffer_->start];
99
100 // Since buffer is used sequentially, we can reset the buffer indices here.
101 buffer_->start = buffer_->length = 0;
102 return length;
103 }
104
105 // This fake class is the consumer used to verify behaviour of the producer.
106 // The |Initialize()| method shows what the consumer should be responsible for
107 // in the production code (minus the mocks).
108 class FakeSpeechRecognizer {
109 public:
110 FakeSpeechRecognizer() : is_responsive_(true) { }
111
112 void Initialize(
113 const blink::WebMediaStreamTrack& track,
114 const media::AudioParameters& sink_params,
115 base::SharedMemoryHandle* foreign_memory_handle) {
116 // Shared memory is allocated, mapped and shared.
117 uint32 shared_memory_size =
118 sizeof(media::AudioInputBufferParameters) +
119 media::AudioBus::CalculateMemorySize(sink_params);
120 shared_memory_.reset(new base::SharedMemory());
121 ASSERT_TRUE(shared_memory_->CreateAndMapAnonymous(shared_memory_size));
122 ASSERT_TRUE(shared_memory_->ShareToProcess(base::GetCurrentProcessHandle(),
123 foreign_memory_handle));
124
125 // Wrap the shared memory for the audio bus.
126 media::AudioInputBuffer* buffer =
127 static_cast<media::AudioInputBuffer*>(shared_memory_->memory());
128 audio_track_bus_ = media::AudioBus::WrapMemory(sink_params, buffer->audio);
129
130 // Reference to the counter used to synchronize.
131 buffer_index_ = &(buffer->params.size);
132 *buffer_index_ = 0U;
133
134 // Create a shared buffer for the |MockSyncSocket|s.
135 shared_buffer_.reset(new MockSyncSocket::SharedBuffer());
136
137 // Local socket will receive signals from the producer.
138 local_socket_.reset(new MockSyncSocket(shared_buffer_.get()));
139
140 // We automatically trigger a Receive when data is sent over the socket.
141 foreign_socket_ = new MockSyncSocket(
142 shared_buffer_.get(),
143 base::Bind(&FakeSpeechRecognizer::EmulateReceiveThreadLoopIteration,
144 base::Unretained(this)));
145
146 // This is usually done to pair the sockets. Here it's not effective.
147 base::SyncSocket::CreatePair(local_socket_.get(), foreign_socket_);
148 }
149
150 // Emulates a single iteraton of a thread receiving on the socket.
151 // This would normally be done on a receiving thread's task on the browser.
152 void EmulateReceiveThreadLoopIteration() {
153 // When not responsive do nothing as if the process is busy.
154 if (!is_responsive_)
155 return;
156
157 local_socket_->Receive(buffer_index_, sizeof(*buffer_index_));
158 // Notify the producer that the audio buffer has been consumed.
159 ++(*buffer_index_);
160 }
161
162 // Used to simulate an unresponsive behaviour of the consumer.
163 void SimulateResponsiveness(bool is_responsive) {
164 is_responsive_ = is_responsive;
165 }
166
167 MockSyncSocket* foreign_socket() { return foreign_socket_; }
168 media::AudioBus* audio_bus() const { return audio_track_bus_.get(); }
169 uint32 buffer_index() { return *buffer_index_; }
170
171 private:
172 bool is_responsive_;
173
174 // Shared memory for the audio and synchronization.
175 scoped_ptr<base::SharedMemory> shared_memory_;
176
177 // Fake sockets and their shared buffer.
178 scoped_ptr<MockSyncSocket::SharedBuffer> shared_buffer_;
179 scoped_ptr<MockSyncSocket> local_socket_;
180 MockSyncSocket* foreign_socket_;
181
182 // Audio bus wrapping the shared memory from the renderer.
183 scoped_ptr<media::AudioBus> audio_track_bus_;
184
185 // Used for synchronization of sent/received buffers.
186 uint32* buffer_index_;
187
188 DISALLOW_COPY_AND_ASSIGN(FakeSpeechRecognizer);
189 };
190
191 } // namespace
192
193 namespace content {
194
195 class SpeechRecognitionAudioSinkTest : public testing::Test {
196 public:
197 SpeechRecognitionAudioSinkTest() {}
198
199 ~SpeechRecognitionAudioSinkTest() {}
200
201 // Initializes the producer and consumer with specified audio parameters.
202 // Returns the minimal number of input audio buffers which need to be captured
203 // before they get sent to the consumer.
204 uint32 Initialize(int input_sample_rate,
205 int input_frames_per_buffer,
206 int output_sample_rate,
207 int output_frames_per_buffer) {
208 // Audio Environment setup.
209 source_params_.Reset(kInputFormat,
210 kInputChannelLayout,
211 kInputChannels,
212 input_sample_rate,
213 kInputBitsPerSample,
214 input_frames_per_buffer);
215 sink_params_.Reset(kOutputFormat,
216 kOutputChannelLayout,
217 kOutputChannels,
218 output_sample_rate,
219 kOutputBitsPerSample,
220 output_frames_per_buffer);
221 source_data_.reset(new int16[input_frames_per_buffer * kInputChannels]);
222
223 // Prepare the track and audio source.
224 blink::WebMediaStreamTrack blink_track;
225 PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track);
226
227 // Get the native track from the blink track and initialize.
228 native_track_ =
229 static_cast<WebRtcLocalAudioTrack*>(blink_track.extraData());
230 native_track_->OnSetFormat(source_params_);
231
232 // Create and initialize the consumer.
233 recognizer_.reset(new FakeSpeechRecognizer());
234 base::SharedMemoryHandle foreign_memory_handle;
235 recognizer_->Initialize(blink_track, sink_params_, &foreign_memory_handle);
236
237 // Create the producer.
238 scoped_ptr<base::SyncSocket> foreign_socket(recognizer_->foreign_socket());
239 speech_audio_sink_.reset(new SpeechRecognitionAudioSink(
240 blink_track, sink_params_, foreign_memory_handle,
241 foreign_socket.Pass(),
242 base::Bind(&SpeechRecognitionAudioSinkTest::StoppedCallback,
243 base::Unretained(this))));
244
245 // Return number of buffers needed to trigger resampling and consumption.
246 return static_cast<uint32>(std::ceil(
247 static_cast<double>(output_frames_per_buffer * input_sample_rate) /
248 (input_frames_per_buffer * output_sample_rate)));
249 }
250
251 // Mock callback expected to be called when the track is stopped.
252 MOCK_METHOD0(StoppedCallback, void());
253
254 protected:
255 // Prepares a blink track of a given MediaStreamType and attaches the native
256 // track which can be used to capture audio data and pass it to the producer.
257 static void PrepareBlinkTrackOfType(
258 const MediaStreamType device_type,
259 blink::WebMediaStreamTrack* blink_track) {
260 StreamDeviceInfo device_info(device_type, "Mock device",
261 "mock_device_id");
262 MockMediaConstraintFactory constraint_factory;
263 const blink::WebMediaConstraints constraints =
264 constraint_factory.CreateWebMediaConstraints();
265 scoped_refptr<WebRtcAudioCapturer> capturer(
266 WebRtcAudioCapturer::CreateCapturer(-1, device_info, constraints, NULL,
267 NULL));
268 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
269 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
270 scoped_ptr<WebRtcLocalAudioTrack> native_track(
271 new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL));
272 blink::WebMediaStreamSource blink_audio_source;
273 blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"),
274 blink::WebMediaStreamSource::TypeAudio,
275 base::UTF8ToUTF16("dummy_source_name"));
276 MediaStreamSource::SourceStoppedCallback cb;
277 blink_audio_source.setExtraData(
278 new MediaStreamAudioSource(-1, device_info, cb, NULL));
279 blink_track->initialize(blink::WebString::fromUTF8("dummy_track"),
280 blink_audio_source);
281 blink_track->setExtraData(native_track.release());
282 }
283
284 // Emulates an audio capture device capturing data from the source.
285 inline void CaptureAudio(const uint32 buffers) {
286 for (uint32 i = 0; i < buffers; ++i)
287 native_track()->Capture(source_data(),
288 base::TimeDelta::FromMilliseconds(0), 1, false,
289 false);
290 }
291
292 // Used to simulate a problem with sockets.
293 void SetFailureModeOnForeignSocket(bool in_failure_mode) {
294 recognizer()->foreign_socket()->SetFailureMode(in_failure_mode);
295 }
296
297 // Helper method for verifying captured audio data has been consumed.
298 inline void AssertConsumedBuffers(const uint32 buffer_index) {
299 ASSERT_EQ(buffer_index, recognizer_->buffer_index());
300 }
301
302 // Helper method for providing audio data to producer and verifying it was
303 // consumed on the recognizer.
304 inline void CaptureAudioAndAssertConsumedBuffers(const uint32 buffers,
305 const uint32 buffer_index) {
306 CaptureAudio(buffers);
307 AssertConsumedBuffers(buffer_index);
308 }
309
310 // Helper method to capture and assert consumption at different sample rates
311 // and audio buffer sizes.
312 inline void AssertConsumptionForAudioParameters(
313 const int input_sample_rate,
314 const int input_frames_per_buffer,
315 const int output_sample_rate,
316 const int output_frames_per_buffer,
317 const uint32 consumptions) {
318 const uint32 kBuffersPerNotification =
319 Initialize(input_sample_rate, input_frames_per_buffer,
320 output_sample_rate, output_frames_per_buffer);
321 AssertConsumedBuffers(0U);
322
323 for (uint32 i = 1U; i <= consumptions; ++i) {
324 CaptureAudio(kBuffersPerNotification);
325 ASSERT_EQ(i, recognizer_->buffer_index())
326 << "Tested at rates: "
327 << "In(" << input_sample_rate << ", " << input_frames_per_buffer
328 << ") "
329 << "Out(" << output_sample_rate << ", " << output_frames_per_buffer
330 << ")";
331 }
332 }
333
334 int16* source_data() { return source_data_.get(); }
335
336 FakeSpeechRecognizer* recognizer() { return recognizer_.get(); }
337
338 const media::AudioParameters& sink_params() { return sink_params_; }
339
340 WebRtcLocalAudioTrack* native_track() { return native_track_; }
341
342 private:
343 // Producer.
344 scoped_ptr<SpeechRecognitionAudioSink> speech_audio_sink_;
345
346 // Consumer.
347 scoped_ptr<FakeSpeechRecognizer> recognizer_;
348
349 // Audio related members.
350 scoped_ptr<int16[]> source_data_;
351 media::AudioParameters source_params_;
352 media::AudioParameters sink_params_;
353 WebRtcLocalAudioTrack* native_track_;
354
355 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionAudioSinkTest);
356 };
357
358 // Not all types of tracks are supported. This test checks if that policy is
359 // implemented correctly.
360 TEST_F(SpeechRecognitionAudioSinkTest, CheckIsSupportedAudioTrack) {
361 typedef std::map<MediaStreamType, bool> SupportedTrackPolicy;
362
363 // This test must be aligned with the policy of supported tracks.
364 SupportedTrackPolicy p;
365 p[MEDIA_NO_SERVICE] = false;
366 p[MEDIA_DEVICE_AUDIO_CAPTURE] = true; // The only one supported for now.
367 p[MEDIA_DEVICE_VIDEO_CAPTURE] = false;
368 p[MEDIA_TAB_AUDIO_CAPTURE] = false;
369 p[MEDIA_TAB_VIDEO_CAPTURE] = false;
370 p[MEDIA_DESKTOP_VIDEO_CAPTURE] = false;
371 p[MEDIA_LOOPBACK_AUDIO_CAPTURE] = false;
372 p[MEDIA_DEVICE_AUDIO_OUTPUT] = false;
373
374 // Ensure this test gets updated along with |content::MediaStreamType| enum.
375 EXPECT_EQ(NUM_MEDIA_TYPES, p.size());
376
377 // Check the the entire policy.
378 for (SupportedTrackPolicy::iterator it = p.begin(); it != p.end(); ++it) {
379 blink::WebMediaStreamTrack blink_track;
380 PrepareBlinkTrackOfType(it->first, &blink_track);
381 ASSERT_EQ(
382 it->second,
383 SpeechRecognitionAudioSink::IsSupportedTrack(blink_track));
384 }
385 }
386
387 // Checks if the producer can support the listed range of input sample rates
388 // and associated buffer sizes.
389 TEST_F(SpeechRecognitionAudioSinkTest, RecognizerNotifiedOnSocket) {
390 const size_t kNumAudioParamTuples = 24;
391 const int kAudioParams[kNumAudioParamTuples][2] = {
392 {8000, 80}, {8000, 800}, {16000, 160}, {16000, 1600},
393 {24000, 240}, {24000, 2400}, {32000, 320}, {32000, 3200},
394 {44100, 441}, {44100, 4410}, {48000, 480}, {48000, 4800},
395 {96000, 960}, {96000, 9600}, {11025, 111}, {11025, 1103},
396 {22050, 221}, {22050, 2205}, {88200, 882}, {88200, 8820},
397 {176400, 1764}, {176400, 17640}, {192000, 1920}, {192000, 19200}};
398
399 // Check all listed tuples of input sample rates and buffers sizes.
400 for (size_t i = 0; i < kNumAudioParamTuples; ++i) {
401 AssertConsumptionForAudioParameters(
402 kAudioParams[i][0], kAudioParams[i][1],
403 kSpeechRecognitionSampleRate, kSpeechRecognitionFramesPerBuffer, 3U);
404 }
405 }
406
407 // Checks that the input data is getting resampled to the target sample rate.
408 TEST_F(SpeechRecognitionAudioSinkTest, AudioDataIsResampledOnSink) {
409 EXPECT_GE(kInputChannels, 1);
410 EXPECT_GE(kOutputChannels, 1);
411
412 // Input audio is sampled at 44.1 KHz with data chunks of 10ms. Desired output
413 // is corresponding to the speech recognition engine requirements: 16 KHz with
414 // 100 ms chunks (1600 frames per buffer).
415 const uint32 kBuffersPerNotification = Initialize(44100, 441, 16000, 1600);
416 // Fill audio input frames with 0, 1, 2, 3, ..., 440.
417 const uint32 kSourceDataLength = 441 * kInputChannels;
418 for (uint32 i = 0; i < kSourceDataLength; ++i) {
419 for (int c = 0; c < kInputChannels; ++c)
420 source_data()[i * kInputChannels + c] = i;
421 }
422
423 // Prepare sink audio bus and data for rendering.
424 media::AudioBus* sink_bus = recognizer()->audio_bus();
425 const uint32 kSinkDataLength = 1600 * kOutputChannels;
426 int16 sink_data[kSinkDataLength] = {0};
427
428 // Render the audio data from the recognizer.
429 sink_bus->ToInterleaved(sink_bus->frames(),
430 sink_params().bits_per_sample() / 8, sink_data);
431
432 // Checking only a fraction of the sink frames.
433 const uint32 kNumFramesToTest = 12;
434
435 // Check all channels are zeroed out before we trigger resampling.
436 for (uint32 i = 0; i < kNumFramesToTest; ++i) {
437 for (int c = 0; c < kOutputChannels; ++c)
438 EXPECT_EQ(0, sink_data[i * kOutputChannels + c]);
439 }
440
441 // Trigger the speech sink to resample the input data.
442 AssertConsumedBuffers(0U);
443 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
444
445 // Render the audio data from the recognizer.
446 sink_bus->ToInterleaved(sink_bus->frames(),
447 sink_params().bits_per_sample() / 8, sink_data);
448
449 // Resampled data expected frames. Extracted based on |source_data()|.
450 const int16 kExpectedData[kNumFramesToTest] = {0, 2, 5, 8, 11, 13,
451 16, 19, 22, 24, 27, 30};
452
453 // Check all channels have the same resampled data.
454 for (uint32 i = 0; i < kNumFramesToTest; ++i) {
455 for (int c = 0; c < kOutputChannels; ++c)
456 EXPECT_EQ(kExpectedData[i], sink_data[i * kOutputChannels + c]);
457 }
458 }
459
460 // Checks that the producer does not misbehave when a socket failure occurs.
461 TEST_F(SpeechRecognitionAudioSinkTest, SyncSocketFailsSendingData) {
462 const uint32 kBuffersPerNotification = Initialize(44100, 441, 16000, 1600);
463 // Start with no problems on the socket.
464 AssertConsumedBuffers(0U);
465 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
466
467 // A failure occurs (socket cannot send).
468 SetFailureModeOnForeignSocket(true);
469 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
470 }
471
472 // Checks that an OnStoppedCallback is issued when the track is stopped.
473 TEST_F(SpeechRecognitionAudioSinkTest, OnReadyStateChangedOccured) {
474 const uint32 kBuffersPerNotification = Initialize(44100, 441, 16000, 1600);
475 AssertConsumedBuffers(0U);
476 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
477 EXPECT_CALL(*this, StoppedCallback()).Times(1);
478
479 native_track()->Stop();
480 CaptureAudioAndAssertConsumedBuffers(kBuffersPerNotification, 1U);
481 }
482
483 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698