| Index: content/renderer/media/speech_recognition_audio_sink_unittest.cc
|
| diff --git a/content/renderer/media/speech_recognition_audio_sink_unittest.cc b/content/renderer/media/speech_recognition_audio_sink_unittest.cc
|
| index 47639a50febdcd4c9e1658fbce6c55c0115028ca..d337b109b5db6e830f0b29f63837f66a826a9b5c 100644
|
| --- a/content/renderer/media/speech_recognition_audio_sink_unittest.cc
|
| +++ b/content/renderer/media/speech_recognition_audio_sink_unittest.cc
|
| @@ -11,15 +11,15 @@
|
|
|
| #include "base/bind.h"
|
| #include "base/macros.h"
|
| +#include "base/strings/utf_string_conversions.h"
|
| #include "content/renderer/media/media_stream_audio_source.h"
|
| -#include "content/renderer/media/media_stream_audio_track.h"
|
| -#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
|
| +#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
|
| +#include "content/renderer/media/webrtc_local_audio_track.h"
|
| #include "media/base/audio_bus.h"
|
| #include "media/base/audio_parameters.h"
|
| #include "testing/gmock/include/gmock/gmock.h"
|
| #include "testing/gtest/include/gtest/gtest.h"
|
| #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
|
| -#include "third_party/WebKit/public/platform/WebString.h"
|
| #include "third_party/WebKit/public/web/WebHeap.h"
|
|
|
| namespace {
|
| @@ -204,27 +204,11 @@
|
|
|
| namespace content {
|
|
|
| -namespace {
|
| -
|
| -class TestDrivenAudioSource : public MediaStreamAudioSource {
|
| - public:
|
| - TestDrivenAudioSource() : MediaStreamAudioSource(true) {}
|
| - ~TestDrivenAudioSource() final {}
|
| -
|
| - // Expose protected methods as public for testing.
|
| - using MediaStreamAudioSource::SetFormat;
|
| - using MediaStreamAudioSource::DeliverDataToTracks;
|
| -};
|
| -
|
| -} // namespace
|
| -
|
| class SpeechRecognitionAudioSinkTest : public testing::Test {
|
| public:
|
| SpeechRecognitionAudioSinkTest() {}
|
|
|
| ~SpeechRecognitionAudioSinkTest() {
|
| - blink_source_.reset();
|
| - blink_track_.reset();
|
| speech_audio_sink_.reset();
|
| blink::WebHeap::collectAllGarbageForTesting();
|
| }
|
| @@ -254,21 +238,24 @@
|
| sample_frames_captured_ = 0;
|
|
|
| // Prepare the track and audio source.
|
| - PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track_);
|
| - blink_source_ = blink_track_.source();
|
| - static_cast<TestDrivenAudioSource*>(
|
| - MediaStreamAudioSource::From(blink_source_))->SetFormat(source_params_);
|
| + blink::WebMediaStreamTrack blink_track;
|
| + PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track);
|
| +
|
| + // Get the native track from the blink track and initialize.
|
| + native_track_ =
|
| + static_cast<WebRtcLocalAudioTrack*>(blink_track.getExtraData());
|
| + native_track_->OnSetFormat(source_params_);
|
|
|
| // Create and initialize the consumer.
|
| recognizer_.reset(new FakeSpeechRecognizer());
|
| base::SharedMemoryHandle foreign_memory_handle;
|
| - recognizer_->Initialize(blink_track_, sink_params_, &foreign_memory_handle);
|
| + recognizer_->Initialize(blink_track, sink_params_, &foreign_memory_handle);
|
|
|
| // Create the producer.
|
| std::unique_ptr<base::SyncSocket> sending_socket(
|
| recognizer_->sending_socket());
|
| speech_audio_sink_.reset(new SpeechRecognitionAudioSink(
|
| - blink_track_, sink_params_, foreign_memory_handle,
|
| + blink_track, sink_params_, foreign_memory_handle,
|
| std::move(sending_socket),
|
| base::Bind(&SpeechRecognitionAudioSinkTest::StoppedCallback,
|
| base::Unretained(this))));
|
| @@ -285,21 +272,25 @@
|
| protected:
|
| // Prepares a blink track of a given MediaStreamType and attaches the native
|
| // track which can be used to capture audio data and pass it to the producer.
|
| - void PrepareBlinkTrackOfType(const MediaStreamType device_type,
|
| - blink::WebMediaStreamTrack* blink_track) {
|
| - blink::WebMediaStreamSource blink_source;
|
| - blink_source.initialize(blink::WebString::fromUTF8("dummy_source_id"),
|
| - blink::WebMediaStreamSource::TypeAudio,
|
| - blink::WebString::fromUTF8("dummy_source_name"),
|
| - false /* remote */);
|
| - TestDrivenAudioSource* const audio_source = new TestDrivenAudioSource();
|
| - audio_source->SetDeviceInfo(
|
| - StreamDeviceInfo(device_type, "Mock device", "mock_device_id"));
|
| - blink_source.setExtraData(audio_source); // Takes ownership.
|
| -
|
| + static void PrepareBlinkTrackOfType(
|
| + const MediaStreamType device_type,
|
| + blink::WebMediaStreamTrack* blink_track) {
|
| + scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
|
| + WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
|
| + std::unique_ptr<WebRtcLocalAudioTrack> native_track(
|
| + new WebRtcLocalAudioTrack(adapter.get()));
|
| + blink::WebMediaStreamSource blink_audio_source;
|
| + blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"),
|
| + blink::WebMediaStreamSource::TypeAudio,
|
| + base::UTF8ToUTF16("dummy_source_name"),
|
| + false /* remote */);
|
| + MediaStreamSource::SourceStoppedCallback cb;
|
| + blink_audio_source.setExtraData(new MediaStreamAudioSource(
|
| + -1, StreamDeviceInfo(device_type, "Mock device", "mock_device_id"), cb,
|
| + nullptr));
|
| blink_track->initialize(blink::WebString::fromUTF8("dummy_track"),
|
| - blink_source);
|
| - ASSERT_TRUE(audio_source->ConnectToTrack(*blink_track));
|
| + blink_audio_source);
|
| + blink_track->setExtraData(native_track.release());
|
| }
|
|
|
| // Emulates an audio capture device capturing data from the source.
|
| @@ -308,9 +299,7 @@
|
| const base::TimeTicks estimated_capture_time = first_frame_capture_time_ +
|
| (sample_frames_captured_ * base::TimeDelta::FromSeconds(1) /
|
| source_params_.sample_rate());
|
| - static_cast<TestDrivenAudioSource*>(
|
| - MediaStreamAudioSource::From(blink_source_))
|
| - ->DeliverDataToTracks(*source_bus_, estimated_capture_time);
|
| + native_track()->Capture(*source_bus_, estimated_capture_time);
|
| sample_frames_captured_ += source_bus_->frames();
|
| }
|
| }
|
| @@ -364,13 +353,9 @@
|
|
|
| const media::AudioParameters& sink_params() const { return sink_params_; }
|
|
|
| - MediaStreamAudioTrack* native_track() const {
|
| - return MediaStreamAudioTrack::From(blink_track_);
|
| - }
|
| + WebRtcLocalAudioTrack* native_track() const { return native_track_; }
|
|
|
| private:
|
| - MockPeerConnectionDependencyFactory mock_dependency_factory_;
|
| -
|
| // Producer.
|
| std::unique_ptr<SpeechRecognitionAudioSink> speech_audio_sink_;
|
|
|
| @@ -381,8 +366,7 @@
|
| std::unique_ptr<media::AudioBus> source_bus_;
|
| media::AudioParameters source_params_;
|
| media::AudioParameters sink_params_;
|
| - blink::WebMediaStreamSource blink_source_;
|
| - blink::WebMediaStreamTrack blink_track_;
|
| + WebRtcLocalAudioTrack* native_track_;
|
|
|
| base::TimeTicks first_frame_capture_time_;
|
| int64_t sample_frames_captured_;
|
|
|