Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1552)

Unified Diff: content/renderer/speech_recognition_dispatcher.cc

Issue 499233003: Binding media stream audio track to speech recognition [renderer] (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Rebase on master - merge fix Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/renderer/speech_recognition_dispatcher.cc
diff --git a/content/renderer/speech_recognition_dispatcher.cc b/content/renderer/speech_recognition_dispatcher.cc
index 178abf4610c286c751047ff46ec252089f2946bb..e5fbfa658dfd9bec96ddde3482392b3a2d140c41 100644
--- a/content/renderer/speech_recognition_dispatcher.cc
+++ b/content/renderer/speech_recognition_dispatcher.cc
@@ -7,6 +7,9 @@
#include "base/basictypes.h"
#include "base/strings/utf_string_conversions.h"
#include "content/common/speech_recognition_messages.h"
+#if defined(ENABLE_WEBRTC)
+#include "content/renderer/media/speech_recognition_audio_sink.h"
+#endif
#include "content/renderer/render_view_impl.h"
#include "third_party/WebKit/public/platform/WebString.h"
#include "third_party/WebKit/public/platform/WebVector.h"
@@ -29,13 +32,12 @@ SpeechRecognitionDispatcher::SpeechRecognitionDispatcher(
RenderViewImpl* render_view)
: RenderViewObserver(render_view),
recognizer_client_(NULL),
- next_id_(1) {
-}
+ next_id_(1) {}
-SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() {
-}
+SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() {}
void SpeechRecognitionDispatcher::AbortAllRecognitions() {
+ ResetAudioSink();
Send(new SpeechRecognitionHostMsg_AbortAllRequests(
routing_id()));
}
@@ -53,6 +55,8 @@ bool SpeechRecognitionDispatcher::OnMessageReceived(
IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded)
IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved,
OnResultsRetrieved)
+ IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SharedAudioBusReady,
no longer working on chromium 2014/10/09 12:19:02 Is SharedAudioBusReady a suitable name here? I thi
burnik 2014/10/09 13:13:04 Done.
+ OnSharedAudioBusReady)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
@@ -65,6 +69,29 @@ void SpeechRecognitionDispatcher::start(
DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client);
recognizer_client_ = recognizer_client;
+#if defined(ENABLE_WEBRTC)
+ const blink::WebMediaStreamTrack track = params.audioTrack();
+ if (!track.isNull()) {
+ // Check if this type of track is allowed by implemented policy.
+ if (SpeechRecognitionAudioSink::IsSupportedTrack(track)) {
+ audio_track_.assign(track);
+ } else {
+ audio_track_.reset();
+ // Notify user that the track used is not supported.
+ recognizer_client_->didReceiveError(
+ handle,
+ WebString("Provided audioTrack is not supported."),
+ WebSpeechRecognizerClient::AudioCaptureError);
+
+ return;
+ }
+ }
+
+ // Destroy any previous instance to detach from the audio track.
+ // Each new session should reinstantiate the provider once the track is ready.
+ ResetAudioSink();
+#endif
+
SpeechRecognitionHostMsg_StartRequest_Params msg_params;
for (size_t i = 0; i < params.grammars().size(); ++i) {
const WebSpeechGrammar& grammar = params.grammars()[i];
@@ -78,6 +105,12 @@ void SpeechRecognitionDispatcher::start(
msg_params.origin_url = params.origin().toString().utf8();
msg_params.render_view_id = routing_id();
msg_params.request_id = GetOrCreateIDForHandle(handle);
+#if defined(ENABLE_WEBRTC)
+ // fall back to default input when the track is not allowed
no longer working on chromium 2014/10/09 12:19:02 nit, s/fall/Fall/g, and end with period.
burnik 2014/10/09 13:13:04 Done.
+ msg_params.using_audio_track = !audio_track_.isNull();
+#else
+ msg_params.using_audio_track = false;
+#endif
// The handle mapping will be removed in |OnRecognitionEnd|.
Send(new SpeechRecognitionHostMsg_StartRequest(msg_params));
}
@@ -85,6 +118,7 @@ void SpeechRecognitionDispatcher::start(
void SpeechRecognitionDispatcher::stop(
const WebSpeechRecognitionHandle& handle,
WebSpeechRecognizerClient* recognizer_client) {
+ ResetAudioSink();
// Ignore a |stop| issued without a matching |start|.
if (recognizer_client_ != recognizer_client || !HandleExists(handle))
return;
@@ -95,6 +129,7 @@ void SpeechRecognitionDispatcher::stop(
void SpeechRecognitionDispatcher::abort(
const WebSpeechRecognitionHandle& handle,
WebSpeechRecognizerClient* recognizer_client) {
+ ResetAudioSink();
// Ignore an |abort| issued without a matching |start|.
if (recognizer_client_ != recognizer_client || !HandleExists(handle))
return;
@@ -154,6 +189,7 @@ void SpeechRecognitionDispatcher::OnErrorOccurred(
recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id),
WebSpeechRecognitionResult());
} else {
+ ResetAudioSink();
recognizer_client_->didReceiveError(
GetHandleFromID(request_id),
WebString(), // TODO(primiano): message?
@@ -174,6 +210,7 @@ void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) {
// didEnd may call back synchronously to start a new recognition session,
// and we don't want to delete the handle from the map after that happens.
handle_map_.erase(request_id);
+ ResetAudioSink();
recognizer_client_->didEnd(handle);
}
}
@@ -211,6 +248,28 @@ void SpeechRecognitionDispatcher::OnResultsRetrieved(
GetHandleFromID(request_id), final, provisional);
}
+void SpeechRecognitionDispatcher::OnSharedAudioBusReady(
+ int request_id,
+ const media::AudioParameters& params,
+ const base::SharedMemoryHandle memory,
+ const base::SyncSocket::TransitDescriptor descriptor) {
+#if defined(ENABLE_WEBRTC)
+ DCHECK(!speech_audio_sink_.get());
+ if (audio_track_.isNull()) {
+ speech_audio_sink_.reset();
no longer working on chromium 2014/10/09 12:19:02 call ResetAudioSink() instead
burnik 2014/10/09 13:13:03 Done.
+ return;
+ }
+
+ // Create socket here and pass ownership to the |speech_audio_sink_|.
+ scoped_ptr<base::SyncSocket> socket(new base::CancelableSyncSocket(
+ base::SyncSocket::UnwrapHandle(descriptor)));
+
+ speech_audio_sink_.reset(new SpeechRecognitionAudioSink(
+ audio_track_, params, memory, socket.Pass(),
+ base::Bind(&SpeechRecognitionDispatcher::ResetAudioSink,
+ base::Unretained(this))));
+#endif
+}
int SpeechRecognitionDispatcher::GetOrCreateIDForHandle(
const WebSpeechRecognitionHandle& handle) {
@@ -239,6 +298,12 @@ bool SpeechRecognitionDispatcher::HandleExists(
return false;
}
+void SpeechRecognitionDispatcher::ResetAudioSink() {
+#if defined(ENABLE_WEBRTC)
+ speech_audio_sink_.reset();
+#endif
+}
+
const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID(
int request_id) {
HandleMap::iterator iter = handle_map_.find(request_id);
« content/content_renderer.gypi ('K') | « content/renderer/speech_recognition_dispatcher.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698