Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(97)

Side by Side Diff: content/renderer/media/speech_recognition_audio_sink_unittest.cc

Issue 1647773002: MediaStream audio sourcing: Bypass audio processing for non-WebRTC cases. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: NOT FOR REVIEW -- This will be broken-up across multiple CLs. Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/speech_recognition_audio_sink.h" 5 #include "content/renderer/media/speech_recognition_audio_sink.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 #include <string.h> 9 #include <string.h>
10 #include <utility> 10 #include <utility>
11 11
12 #include "base/bind.h" 12 #include "base/bind.h"
13 #include "base/macros.h" 13 #include "base/macros.h"
14 #include "base/strings/utf_string_conversions.h" 14 #include "base/strings/utf_string_conversions.h"
15 #include "content/renderer/media/media_stream_audio_source.h" 15 #include "content/renderer/media/media_stream_audio_track.h"
16 #include "content/renderer/media/mock_media_constraint_factory.h" 16 #include "content/renderer/media/mock_media_constraint_factory.h"
17 #include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" 17 #include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory. h"
18 #include "content/renderer/media/webrtc_local_audio_track.h" 18 #include "content/renderer/media/webrtc/processed_local_audio_source.h"
19 #include "media/audio/audio_parameters.h" 19 #include "media/audio/audio_parameters.h"
20 #include "media/base/audio_bus.h" 20 #include "media/base/audio_bus.h"
21 #include "testing/gmock/include/gmock/gmock.h" 21 #include "testing/gmock/include/gmock/gmock.h"
22 #include "testing/gtest/include/gtest/gtest.h" 22 #include "testing/gtest/include/gtest/gtest.h"
23 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h" 23 #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
24 #include "third_party/WebKit/public/web/WebHeap.h" 24 #include "third_party/WebKit/public/web/WebHeap.h"
25 25
26 namespace { 26 namespace {
27 27
28 // Supported speech recognition audio parameters. 28 // Supported speech recognition audio parameters.
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after
236 media::AudioBus::Create(kInputChannels, input_frames_per_buffer); 236 media::AudioBus::Create(kInputChannels, input_frames_per_buffer);
237 source_bus_->Zero(); 237 source_bus_->Zero();
238 first_frame_capture_time_ = base::TimeTicks::Now(); 238 first_frame_capture_time_ = base::TimeTicks::Now();
239 sample_frames_captured_ = 0; 239 sample_frames_captured_ = 0;
240 240
241 // Prepare the track and audio source. 241 // Prepare the track and audio source.
242 blink::WebMediaStreamTrack blink_track; 242 blink::WebMediaStreamTrack blink_track;
243 PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track); 243 PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track);
244 244
245 // Get the native track from the blink track and initialize. 245 // Get the native track from the blink track and initialize.
246 native_track_ = 246 native_track_ = MediaStreamAudioTrack::Get(blink_track);
247 static_cast<WebRtcLocalAudioTrack*>(blink_track.extraData()); 247 native_track_->SetFormat(source_params_);
248 native_track_->OnSetFormat(source_params_);
249 248
250 // Create and initialize the consumer. 249 // Create and initialize the consumer.
251 recognizer_.reset(new FakeSpeechRecognizer()); 250 recognizer_.reset(new FakeSpeechRecognizer());
252 base::SharedMemoryHandle foreign_memory_handle; 251 base::SharedMemoryHandle foreign_memory_handle;
253 recognizer_->Initialize(blink_track, sink_params_, &foreign_memory_handle); 252 recognizer_->Initialize(blink_track, sink_params_, &foreign_memory_handle);
254 253
255 // Create the producer. 254 // Create the producer.
256 scoped_ptr<base::SyncSocket> sending_socket(recognizer_->sending_socket()); 255 scoped_ptr<base::SyncSocket> sending_socket(recognizer_->sending_socket());
257 speech_audio_sink_.reset(new SpeechRecognitionAudioSink( 256 speech_audio_sink_.reset(new SpeechRecognitionAudioSink(
258 blink_track, sink_params_, foreign_memory_handle, 257 blink_track, sink_params_, foreign_memory_handle,
259 std::move(sending_socket), 258 std::move(sending_socket),
260 base::Bind(&SpeechRecognitionAudioSinkTest::StoppedCallback, 259 base::Bind(&SpeechRecognitionAudioSinkTest::StoppedCallback,
261 base::Unretained(this)))); 260 base::Unretained(this))));
262 261
263 // Return number of buffers needed to trigger resampling and consumption. 262 // Return number of buffers needed to trigger resampling and consumption.
264 return static_cast<uint32_t>(std::ceil( 263 return static_cast<uint32_t>(std::ceil(
265 static_cast<double>(output_frames_per_buffer * input_sample_rate) / 264 static_cast<double>(output_frames_per_buffer * input_sample_rate) /
266 (input_frames_per_buffer * output_sample_rate))); 265 (input_frames_per_buffer * output_sample_rate)));
267 } 266 }
268 267
269 // Mock callback expected to be called when the track is stopped. 268 // Mock callback expected to be called when the track is stopped.
270 MOCK_METHOD0(StoppedCallback, void()); 269 MOCK_METHOD0(StoppedCallback, void());
271 270
272 protected: 271 protected:
273 // Prepares a blink track of a given MediaStreamType and attaches the native 272 // Prepares a blink track of a given MediaStreamType and attaches the native
274 // track which can be used to capture audio data and pass it to the producer. 273 // track which can be used to capture audio data and pass it to the producer.
275 static void PrepareBlinkTrackOfType( 274 void PrepareBlinkTrackOfType(const MediaStreamType device_type,
276 const MediaStreamType device_type, 275 blink::WebMediaStreamTrack* blink_track) {
277 blink::WebMediaStreamTrack* blink_track) {
278 StreamDeviceInfo device_info(device_type, "Mock device",
279 "mock_device_id");
280 MockMediaConstraintFactory constraint_factory;
281 const blink::WebMediaConstraints constraints =
282 constraint_factory.CreateWebMediaConstraints();
283 scoped_refptr<WebRtcAudioCapturer> capturer(
284 WebRtcAudioCapturer::CreateCapturer(-1, device_info, constraints, NULL,
285 NULL));
286 scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
287 WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
288 scoped_ptr<WebRtcLocalAudioTrack> native_track(
289 new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL));
290 blink::WebMediaStreamSource blink_audio_source; 276 blink::WebMediaStreamSource blink_audio_source;
291 blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"), 277 blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"),
292 blink::WebMediaStreamSource::TypeAudio, 278 blink::WebMediaStreamSource::TypeAudio,
293 base::UTF8ToUTF16("dummy_source_name"), 279 base::UTF8ToUTF16("dummy_source_name"),
294 false /* remote */, true /* readonly */); 280 false /* remote */, true /* readonly */);
295 MediaStreamSource::SourceStoppedCallback cb; 281 ProcessedLocalAudioSource* const audio_source =
296 blink_audio_source.setExtraData( 282 new ProcessedLocalAudioSource(
297 new MediaStreamAudioSource(-1, device_info, cb, NULL)); 283 -1 /* consumer_render_frame_id is N/A for non-browser tests */,
284 StreamDeviceInfo(device_type, "Mock device", "mock_device_id"),
285 &mock_dependency_factory_);
286 audio_source->SetAllowInvalidRenderFrameIdForTesting(true);
287 audio_source->SetSourceConstraints(
288 MockMediaConstraintFactory().CreateWebMediaConstraints());
289 blink_audio_source.setExtraData(audio_source); // Takes ownership.
290
298 blink_track->initialize(blink::WebString::fromUTF8("dummy_track"), 291 blink_track->initialize(blink::WebString::fromUTF8("dummy_track"),
299 blink_audio_source); 292 blink_audio_source);
300 blink_track->setExtraData(native_track.release()); 293 ASSERT_TRUE(audio_source->ConnectToTrack(*blink_track));
301 } 294 }
302 295
303 // Emulates an audio capture device capturing data from the source. 296 // Emulates an audio capture device capturing data from the source.
304 inline void CaptureAudio(const uint32_t buffers) { 297 inline void CaptureAudio(const uint32_t buffers) {
305 for (uint32_t i = 0; i < buffers; ++i) { 298 for (uint32_t i = 0; i < buffers; ++i) {
306 const base::TimeTicks estimated_capture_time = first_frame_capture_time_ + 299 const base::TimeTicks estimated_capture_time = first_frame_capture_time_ +
307 (sample_frames_captured_ * base::TimeDelta::FromSeconds(1) / 300 (sample_frames_captured_ * base::TimeDelta::FromSeconds(1) /
308 source_params_.sample_rate()); 301 source_params_.sample_rate());
309 native_track()->Capture(*source_bus_, estimated_capture_time, false); 302 native_track()->DeliverDataToSinks(*source_bus_, estimated_capture_time);
310 sample_frames_captured_ += source_bus_->frames(); 303 sample_frames_captured_ += source_bus_->frames();
311 } 304 }
312 } 305 }
313 306
314 // Used to simulate a problem with sockets. 307 // Used to simulate a problem with sockets.
315 void SetFailureModeOnForeignSocket(bool in_failure_mode) { 308 void SetFailureModeOnForeignSocket(bool in_failure_mode) {
316 recognizer()->sending_socket()->SetFailureMode(in_failure_mode); 309 recognizer()->sending_socket()->SetFailureMode(in_failure_mode);
317 } 310 }
318 311
319 // Helper method for verifying captured audio data has been consumed. 312 // Helper method for verifying captured audio data has been consumed.
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
353 << ")"; 346 << ")";
354 } 347 }
355 } 348 }
356 349
357 media::AudioBus* source_bus() const { return source_bus_.get(); } 350 media::AudioBus* source_bus() const { return source_bus_.get(); }
358 351
359 FakeSpeechRecognizer* recognizer() const { return recognizer_.get(); } 352 FakeSpeechRecognizer* recognizer() const { return recognizer_.get(); }
360 353
361 const media::AudioParameters& sink_params() const { return sink_params_; } 354 const media::AudioParameters& sink_params() const { return sink_params_; }
362 355
363 WebRtcLocalAudioTrack* native_track() const { return native_track_; } 356 MediaStreamAudioTrack* native_track() const { return native_track_; }
364 357
365 private: 358 private:
359 MockPeerConnectionDependencyFactory mock_dependency_factory_;
360
366 // Producer. 361 // Producer.
367 scoped_ptr<SpeechRecognitionAudioSink> speech_audio_sink_; 362 scoped_ptr<SpeechRecognitionAudioSink> speech_audio_sink_;
368 363
369 // Consumer. 364 // Consumer.
370 scoped_ptr<FakeSpeechRecognizer> recognizer_; 365 scoped_ptr<FakeSpeechRecognizer> recognizer_;
371 366
372 // Audio related members. 367 // Audio related members.
373 scoped_ptr<media::AudioBus> source_bus_; 368 scoped_ptr<media::AudioBus> source_bus_;
374 media::AudioParameters source_params_; 369 media::AudioParameters source_params_;
375 media::AudioParameters sink_params_; 370 media::AudioParameters sink_params_;
376 WebRtcLocalAudioTrack* native_track_; 371 MediaStreamAudioTrack* native_track_;
377 372
378 base::TimeTicks first_frame_capture_time_; 373 base::TimeTicks first_frame_capture_time_;
379 int64_t sample_frames_captured_; 374 int64_t sample_frames_captured_;
380 375
381 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionAudioSinkTest); 376 DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionAudioSinkTest);
382 }; 377 };
383 378
384 // Not all types of tracks are supported. This test checks if that policy is 379 // Not all types of tracks are supported. This test checks if that policy is
385 // implemented correctly. 380 // implemented correctly.
386 TEST_F(SpeechRecognitionAudioSinkTest, CheckIsSupportedAudioTrack) { 381 TEST_F(SpeechRecognitionAudioSinkTest, CheckIsSupportedAudioTrack) {
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
528 const uint32_t buffers_per_notification = Initialize(44100, 441, 16000, 1600); 523 const uint32_t buffers_per_notification = Initialize(44100, 441, 16000, 1600);
529 AssertConsumedBuffers(0U); 524 AssertConsumedBuffers(0U);
530 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); 525 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U);
531 EXPECT_CALL(*this, StoppedCallback()).Times(1); 526 EXPECT_CALL(*this, StoppedCallback()).Times(1);
532 527
533 native_track()->Stop(); 528 native_track()->Stop();
534 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U); 529 CaptureAudioAndAssertConsumedBuffers(buffers_per_notification, 1U);
535 } 530 }
536 531
537 } // namespace content 532 } // namespace content
OLDNEW
« no previous file with comments | « content/renderer/media/rtc_peer_connection_handler_unittest.cc ('k') | content/renderer/media/tagged_list.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698