| Index: content/renderer/speech_recognition_dispatcher.cc
|
| diff --git a/content/renderer/speech_recognition_dispatcher.cc b/content/renderer/speech_recognition_dispatcher.cc
|
| index 178abf4610c286c751047ff46ec252089f2946bb..7354133d2aa62703de20b895d01d71ec5da8f930 100644
|
| --- a/content/renderer/speech_recognition_dispatcher.cc
|
| +++ b/content/renderer/speech_recognition_dispatcher.cc
|
| @@ -7,6 +7,7 @@
|
| #include "base/basictypes.h"
|
| #include "base/strings/utf_string_conversions.h"
|
| #include "content/common/speech_recognition_messages.h"
|
| +#include "content/renderer/media/speech_recognition_audio_sink.h"
|
| #include "content/renderer/render_view_impl.h"
|
| #include "third_party/WebKit/public/platform/WebString.h"
|
| #include "third_party/WebKit/public/platform/WebVector.h"
|
| @@ -29,13 +30,12 @@ SpeechRecognitionDispatcher::SpeechRecognitionDispatcher(
|
| RenderViewImpl* render_view)
|
| : RenderViewObserver(render_view),
|
| recognizer_client_(NULL),
|
| - next_id_(1) {
|
| -}
|
| + next_id_(1) {}
|
|
|
| -SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() {
|
| -}
|
| +SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() {}
|
|
|
| void SpeechRecognitionDispatcher::AbortAllRecognitions() {
|
| + speech_audio_sink_.reset();
|
| Send(new SpeechRecognitionHostMsg_AbortAllRequests(
|
| routing_id()));
|
| }
|
| @@ -53,6 +53,7 @@ bool SpeechRecognitionDispatcher::OnMessageReceived(
|
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded)
|
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved,
|
| OnResultsRetrieved)
|
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioTrackReady, OnAudioTrackReady)
|
| IPC_MESSAGE_UNHANDLED(handled = false)
|
| IPC_END_MESSAGE_MAP()
|
| return handled;
|
| @@ -65,6 +66,27 @@ void SpeechRecognitionDispatcher::start(
|
| DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client);
|
| recognizer_client_ = recognizer_client;
|
|
|
| + const blink::WebMediaStreamTrack track = params.audioTrack();
|
| + if (!track.isNull()) {
|
| + // Check if this type of track is allowed by implemented policy.
|
| + if (SpeechRecognitionAudioSink::IsSupportedTrack(track)) {
|
| + audio_track_.assign(track);
|
| + } else {
|
| + audio_track_.reset();
|
| + // Notify user that the track used is not supported.
|
| + recognizer_client_->didReceiveError(
|
| + handle,
|
| + WebString("Provided audioTrack is not supported."),
|
| + WebSpeechRecognizerClient::AudioCaptureError);
|
| +
|
| + return;
|
| + }
|
| + }
|
| +
|
| + // Destroy any previous instance to detach from the audio track.
|
| + // Each new session should reinstantiate the provider once the track is ready.
|
| + speech_audio_sink_.reset();
|
| +
|
| SpeechRecognitionHostMsg_StartRequest_Params msg_params;
|
| for (size_t i = 0; i < params.grammars().size(); ++i) {
|
| const WebSpeechGrammar& grammar = params.grammars()[i];
|
| @@ -78,6 +100,8 @@ void SpeechRecognitionDispatcher::start(
|
| msg_params.origin_url = params.origin().toString().utf8();
|
| msg_params.render_view_id = routing_id();
|
| msg_params.request_id = GetOrCreateIDForHandle(handle);
|
| + // fall back to default input when the track is not allowed
|
| + msg_params.using_audio_track = !audio_track_.isNull();
|
| // The handle mapping will be removed in |OnRecognitionEnd|.
|
| Send(new SpeechRecognitionHostMsg_StartRequest(msg_params));
|
| }
|
| @@ -85,6 +109,7 @@ void SpeechRecognitionDispatcher::start(
|
| void SpeechRecognitionDispatcher::stop(
|
| const WebSpeechRecognitionHandle& handle,
|
| WebSpeechRecognizerClient* recognizer_client) {
|
| + speech_audio_sink_.reset();
|
| // Ignore a |stop| issued without a matching |start|.
|
| if (recognizer_client_ != recognizer_client || !HandleExists(handle))
|
| return;
|
| @@ -95,6 +120,7 @@ void SpeechRecognitionDispatcher::stop(
|
| void SpeechRecognitionDispatcher::abort(
|
| const WebSpeechRecognitionHandle& handle,
|
| WebSpeechRecognizerClient* recognizer_client) {
|
| + speech_audio_sink_.reset();
|
| // Ignore an |abort| issued without a matching |start|.
|
| if (recognizer_client_ != recognizer_client || !HandleExists(handle))
|
| return;
|
| @@ -154,6 +180,7 @@ void SpeechRecognitionDispatcher::OnErrorOccurred(
|
| recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id),
|
| WebSpeechRecognitionResult());
|
| } else {
|
| + speech_audio_sink_.reset();
|
| recognizer_client_->didReceiveError(
|
| GetHandleFromID(request_id),
|
| WebString(), // TODO(primiano): message?
|
| @@ -174,6 +201,7 @@ void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) {
|
| // didEnd may call back synchronously to start a new recognition session,
|
| // and we don't want to delete the handle from the map after that happens.
|
| handle_map_.erase(request_id);
|
| + speech_audio_sink_.reset();
|
| recognizer_client_->didEnd(handle);
|
| }
|
| }
|
| @@ -211,6 +239,26 @@ void SpeechRecognitionDispatcher::OnResultsRetrieved(
|
| GetHandleFromID(request_id), final, provisional);
|
| }
|
|
|
| +void SpeechRecognitionDispatcher::OnAudioTrackReady(
|
| + int request_id,
|
| + const media::AudioParameters& params,
|
| + base::SharedMemoryHandle memory,
|
| + base::SyncSocket::TransitDescriptor descriptor) {
|
| + DCHECK(!speech_audio_sink_.get());
|
| + if (audio_track_.isNull()) {
|
| + speech_audio_sink_.reset();
|
| + return;
|
| + }
|
| +
|
| + // Create socket here and pass ownership to the |speech_audio_sink_|.
|
| + scoped_ptr<base::SyncSocket> socket(new base::CancelableSyncSocket(
|
| + base::SyncSocket::UnwrapHandle(descriptor)));
|
| +
|
| + speech_audio_sink_.reset(new SpeechRecognitionAudioSink(
|
| + audio_track_, params, memory, socket.Pass(),
|
| + base::Bind(&SpeechRecognitionDispatcher::ResetAudioSink,
|
| + base::Unretained(this))));
|
| +}
|
|
|
| int SpeechRecognitionDispatcher::GetOrCreateIDForHandle(
|
| const WebSpeechRecognitionHandle& handle) {
|
| @@ -239,6 +287,10 @@ bool SpeechRecognitionDispatcher::HandleExists(
|
| return false;
|
| }
|
|
|
| +void SpeechRecognitionDispatcher::ResetAudioSink() {
|
| + speech_audio_sink_.reset();
|
| +}
|
| +
|
| const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID(
|
| int request_id) {
|
| HandleMap::iterator iter = handle_map_.find(request_id);
|
|
|