Index: content/renderer/media/speech_recognition_audio_sink_unittest.cc |
diff --git a/content/renderer/media/speech_recognition_audio_sink_unittest.cc b/content/renderer/media/speech_recognition_audio_sink_unittest.cc |
index 64ee4348f10d53570d6f09f0271141479d168596..c88ef61cb8708d2eb4ca996f46c389bc30a5c08f 100644 |
--- a/content/renderer/media/speech_recognition_audio_sink_unittest.cc |
+++ b/content/renderer/media/speech_recognition_audio_sink_unittest.cc |
@@ -12,10 +12,10 @@ |
#include "base/bind.h" |
#include "base/macros.h" |
#include "base/strings/utf_string_conversions.h" |
-#include "content/renderer/media/media_stream_audio_source.h" |
+#include "content/renderer/media/media_stream_audio_track.h" |
#include "content/renderer/media/mock_media_constraint_factory.h" |
-#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h" |
-#include "content/renderer/media/webrtc_local_audio_track.h" |
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h" |
+#include "content/renderer/media/webrtc/processed_local_audio_source.h" |
#include "media/audio/audio_parameters.h" |
#include "media/base/audio_bus.h" |
#include "testing/gmock/include/gmock/gmock.h" |
@@ -243,9 +243,8 @@ class SpeechRecognitionAudioSinkTest : public testing::Test { |
PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track); |
// Get the native track from the blink track and initialize. |
- native_track_ = |
- static_cast<WebRtcLocalAudioTrack*>(blink_track.extraData()); |
- native_track_->OnSetFormat(source_params_); |
+ native_track_ = MediaStreamAudioTrack::Get(blink_track); |
+ native_track_->SetFormat(source_params_); |
// Create and initialize the consumer. |
recognizer_.reset(new FakeSpeechRecognizer()); |
@@ -272,32 +271,26 @@ class SpeechRecognitionAudioSinkTest : public testing::Test { |
protected: |
// Prepares a blink track of a given MediaStreamType and attaches the native |
// track which can be used to capture audio data and pass it to the producer. |
- static void PrepareBlinkTrackOfType( |
- const MediaStreamType device_type, |
- blink::WebMediaStreamTrack* blink_track) { |
- StreamDeviceInfo device_info(device_type, "Mock device", |
- "mock_device_id"); |
- MockMediaConstraintFactory constraint_factory; |
- const blink::WebMediaConstraints constraints = |
- constraint_factory.CreateWebMediaConstraints(); |
- scoped_refptr<WebRtcAudioCapturer> capturer( |
- WebRtcAudioCapturer::CreateCapturer(-1, device_info, constraints, NULL, |
- NULL)); |
- scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter( |
- WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)); |
- scoped_ptr<WebRtcLocalAudioTrack> native_track( |
- new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL)); |
+ void PrepareBlinkTrackOfType(const MediaStreamType device_type, |
+ blink::WebMediaStreamTrack* blink_track) { |
blink::WebMediaStreamSource blink_audio_source; |
blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"), |
blink::WebMediaStreamSource::TypeAudio, |
base::UTF8ToUTF16("dummy_source_name"), |
false /* remote */, true /* readonly */); |
- MediaStreamSource::SourceStoppedCallback cb; |
- blink_audio_source.setExtraData( |
- new MediaStreamAudioSource(-1, device_info, cb, NULL)); |
+ ProcessedLocalAudioSource* const audio_source = |
+ new ProcessedLocalAudioSource( |
+ -1 /* consumer_render_frame_id is N/A for non-browser tests */, |
+ StreamDeviceInfo(device_type, "Mock device", "mock_device_id"), |
+ &mock_dependency_factory_); |
+ audio_source->SetAllowInvalidRenderFrameIdForTesting(true); |
+ audio_source->SetSourceConstraints( |
+ MockMediaConstraintFactory().CreateWebMediaConstraints()); |
+ blink_audio_source.setExtraData(audio_source); // Takes ownership. |
+ |
blink_track->initialize(blink::WebString::fromUTF8("dummy_track"), |
blink_audio_source); |
- blink_track->setExtraData(native_track.release()); |
+ ASSERT_TRUE(audio_source->ConnectToTrack(*blink_track)); |
} |
// Emulates an audio capture device capturing data from the source. |
@@ -306,7 +299,7 @@ class SpeechRecognitionAudioSinkTest : public testing::Test { |
const base::TimeTicks estimated_capture_time = first_frame_capture_time_ + |
(sample_frames_captured_ * base::TimeDelta::FromSeconds(1) / |
source_params_.sample_rate()); |
- native_track()->Capture(*source_bus_, estimated_capture_time, false); |
+ native_track()->DeliverDataToSinks(*source_bus_, estimated_capture_time); |
sample_frames_captured_ += source_bus_->frames(); |
} |
} |
@@ -360,9 +353,11 @@ class SpeechRecognitionAudioSinkTest : public testing::Test { |
const media::AudioParameters& sink_params() const { return sink_params_; } |
- WebRtcLocalAudioTrack* native_track() const { return native_track_; } |
+ MediaStreamAudioTrack* native_track() const { return native_track_; } |
private: |
+ MockPeerConnectionDependencyFactory mock_dependency_factory_; |
+ |
// Producer. |
scoped_ptr<SpeechRecognitionAudioSink> speech_audio_sink_; |
@@ -373,7 +368,7 @@ class SpeechRecognitionAudioSinkTest : public testing::Test { |
scoped_ptr<media::AudioBus> source_bus_; |
media::AudioParameters source_params_; |
media::AudioParameters sink_params_; |
- WebRtcLocalAudioTrack* native_track_; |
+ MediaStreamAudioTrack* native_track_; |
base::TimeTicks first_frame_capture_time_; |
int64_t sample_frames_captured_; |