Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(296)

Unified Diff: content/renderer/media/speech_recognition_audio_sink_unittest.cc

Issue 1834323002: MediaStream audio: Refactor 3 separate "glue" implementations into one. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: REBASE + Workaround to ensure MediaStreamAudioProcessor is destroyed on the main thread. Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/renderer/media/speech_recognition_audio_sink_unittest.cc
diff --git a/content/renderer/media/speech_recognition_audio_sink_unittest.cc b/content/renderer/media/speech_recognition_audio_sink_unittest.cc
index d337b109b5db6e830f0b29f63837f66a826a9b5c..47639a50febdcd4c9e1658fbce6c55c0115028ca 100644
--- a/content/renderer/media/speech_recognition_audio_sink_unittest.cc
+++ b/content/renderer/media/speech_recognition_audio_sink_unittest.cc
@@ -11,15 +11,15 @@
#include "base/bind.h"
#include "base/macros.h"
-#include "base/strings/utf_string_conversions.h"
#include "content/renderer/media/media_stream_audio_source.h"
-#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
-#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "content/renderer/media/media_stream_audio_track.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_parameters.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+#include "third_party/WebKit/public/platform/WebString.h"
#include "third_party/WebKit/public/web/WebHeap.h"
namespace {
@@ -204,11 +204,27 @@ class FakeSpeechRecognizer {
namespace content {
+namespace {
+
+class TestDrivenAudioSource : public MediaStreamAudioSource {
+ public:
+ TestDrivenAudioSource() : MediaStreamAudioSource(true) {}
+ ~TestDrivenAudioSource() final {}
+
+ // Expose protected methods as public for testing.
+ using MediaStreamAudioSource::SetFormat;
+ using MediaStreamAudioSource::DeliverDataToTracks;
+};
+
+} // namespace
+
class SpeechRecognitionAudioSinkTest : public testing::Test {
public:
SpeechRecognitionAudioSinkTest() {}
~SpeechRecognitionAudioSinkTest() {
+ blink_source_.reset();
+ blink_track_.reset();
speech_audio_sink_.reset();
blink::WebHeap::collectAllGarbageForTesting();
}
@@ -238,24 +254,21 @@ class SpeechRecognitionAudioSinkTest : public testing::Test {
sample_frames_captured_ = 0;
// Prepare the track and audio source.
- blink::WebMediaStreamTrack blink_track;
- PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track);
-
- // Get the native track from the blink track and initialize.
- native_track_ =
- static_cast<WebRtcLocalAudioTrack*>(blink_track.getExtraData());
- native_track_->OnSetFormat(source_params_);
+ PrepareBlinkTrackOfType(MEDIA_DEVICE_AUDIO_CAPTURE, &blink_track_);
+ blink_source_ = blink_track_.source();
+ static_cast<TestDrivenAudioSource*>(
+ MediaStreamAudioSource::From(blink_source_))->SetFormat(source_params_);
// Create and initialize the consumer.
recognizer_.reset(new FakeSpeechRecognizer());
base::SharedMemoryHandle foreign_memory_handle;
- recognizer_->Initialize(blink_track, sink_params_, &foreign_memory_handle);
+ recognizer_->Initialize(blink_track_, sink_params_, &foreign_memory_handle);
// Create the producer.
std::unique_ptr<base::SyncSocket> sending_socket(
recognizer_->sending_socket());
speech_audio_sink_.reset(new SpeechRecognitionAudioSink(
- blink_track, sink_params_, foreign_memory_handle,
+ blink_track_, sink_params_, foreign_memory_handle,
std::move(sending_socket),
base::Bind(&SpeechRecognitionAudioSinkTest::StoppedCallback,
base::Unretained(this))));
@@ -272,25 +285,21 @@ class SpeechRecognitionAudioSinkTest : public testing::Test {
protected:
// Prepares a blink track of a given MediaStreamType and attaches the native
// track which can be used to capture audio data and pass it to the producer.
- static void PrepareBlinkTrackOfType(
- const MediaStreamType device_type,
- blink::WebMediaStreamTrack* blink_track) {
- scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
- WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
- std::unique_ptr<WebRtcLocalAudioTrack> native_track(
- new WebRtcLocalAudioTrack(adapter.get()));
- blink::WebMediaStreamSource blink_audio_source;
- blink_audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"),
- blink::WebMediaStreamSource::TypeAudio,
- base::UTF8ToUTF16("dummy_source_name"),
- false /* remote */);
- MediaStreamSource::SourceStoppedCallback cb;
- blink_audio_source.setExtraData(new MediaStreamAudioSource(
- -1, StreamDeviceInfo(device_type, "Mock device", "mock_device_id"), cb,
- nullptr));
+ void PrepareBlinkTrackOfType(const MediaStreamType device_type,
+ blink::WebMediaStreamTrack* blink_track) {
+ blink::WebMediaStreamSource blink_source;
+ blink_source.initialize(blink::WebString::fromUTF8("dummy_source_id"),
+ blink::WebMediaStreamSource::TypeAudio,
+ blink::WebString::fromUTF8("dummy_source_name"),
+ false /* remote */);
+ TestDrivenAudioSource* const audio_source = new TestDrivenAudioSource();
+ audio_source->SetDeviceInfo(
+ StreamDeviceInfo(device_type, "Mock device", "mock_device_id"));
+ blink_source.setExtraData(audio_source); // Takes ownership.
+
blink_track->initialize(blink::WebString::fromUTF8("dummy_track"),
- blink_audio_source);
- blink_track->setExtraData(native_track.release());
+ blink_source);
+ ASSERT_TRUE(audio_source->ConnectToTrack(*blink_track));
}
// Emulates an audio capture device capturing data from the source.
@@ -299,7 +308,9 @@ class SpeechRecognitionAudioSinkTest : public testing::Test {
const base::TimeTicks estimated_capture_time = first_frame_capture_time_ +
(sample_frames_captured_ * base::TimeDelta::FromSeconds(1) /
source_params_.sample_rate());
- native_track()->Capture(*source_bus_, estimated_capture_time);
+ static_cast<TestDrivenAudioSource*>(
+ MediaStreamAudioSource::From(blink_source_))
+ ->DeliverDataToTracks(*source_bus_, estimated_capture_time);
sample_frames_captured_ += source_bus_->frames();
}
}
@@ -353,9 +364,13 @@ class SpeechRecognitionAudioSinkTest : public testing::Test {
const media::AudioParameters& sink_params() const { return sink_params_; }
- WebRtcLocalAudioTrack* native_track() const { return native_track_; }
+ MediaStreamAudioTrack* native_track() const {
+ return MediaStreamAudioTrack::From(blink_track_);
+ }
private:
+ MockPeerConnectionDependencyFactory mock_dependency_factory_;
+
// Producer.
std::unique_ptr<SpeechRecognitionAudioSink> speech_audio_sink_;
@@ -366,7 +381,8 @@ class SpeechRecognitionAudioSinkTest : public testing::Test {
std::unique_ptr<media::AudioBus> source_bus_;
media::AudioParameters source_params_;
media::AudioParameters sink_params_;
- WebRtcLocalAudioTrack* native_track_;
+ blink::WebMediaStreamSource blink_source_;
+ blink::WebMediaStreamTrack blink_track_;
base::TimeTicks first_frame_capture_time_;
int64_t sample_frames_captured_;
« no previous file with comments | « content/renderer/media/rtc_peer_connection_handler_unittest.cc ('k') | content/renderer/media/tagged_list.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698