| Index: content/renderer/speech_recognition_dispatcher.cc
|
| diff --git a/content/renderer/speech_recognition_dispatcher.cc b/content/renderer/speech_recognition_dispatcher.cc
|
| index 178abf4610c286c751047ff46ec252089f2946bb..8e6e43858fafc4252dbf3f54f6f04ede4c0c4109 100644
|
| --- a/content/renderer/speech_recognition_dispatcher.cc
|
| +++ b/content/renderer/speech_recognition_dispatcher.cc
|
| @@ -29,13 +29,13 @@ SpeechRecognitionDispatcher::SpeechRecognitionDispatcher(
|
| RenderViewImpl* render_view)
|
| : RenderViewObserver(render_view),
|
| recognizer_client_(NULL),
|
| - next_id_(1) {
|
| -}
|
| + next_id_(1) { }
|
|
|
| SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() {
|
| }
|
|
|
| void SpeechRecognitionDispatcher::AbortAllRecognitions() {
|
| + audio_source_provider_.reset();
|
| Send(new SpeechRecognitionHostMsg_AbortAllRequests(
|
| routing_id()));
|
| }
|
| @@ -53,18 +53,42 @@ bool SpeechRecognitionDispatcher::OnMessageReceived(
|
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded)
|
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved,
|
| OnResultsRetrieved)
|
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioTrackReady, OnAudioTrackReady)
|
| IPC_MESSAGE_UNHANDLED(handled = false)
|
| IPC_END_MESSAGE_MAP()
|
| return handled;
|
| }
|
|
|
| void SpeechRecognitionDispatcher::start(
|
| + const blink::WebSpeechRecognitionHandle& handle,
|
| + const blink::WebSpeechRecognitionParams& params,
|
| + const blink::WebMediaStreamTrack& audio_track,
|
| + blink::WebSpeechRecognizerClient* recognizer_client) {
|
| + // Check if this type of track is allowed by implemented policy.
|
| + if (SpeechRecognitionAudioSourceProvider::IsSupportedTrack(audio_track)) {
|
| + audio_track_.assign(audio_track);
|
| + } else {
|
| + audio_track_.reset();
|
| + // Notify user that the track used is not supported.
|
| + recognizer_client_->didReceiveError(
|
| + handle,
|
| + WebString("Provided audioTrack is not supported. Ignoring track."),
|
| + WebSpeechRecognizerClient::NotAllowedError);
|
| + }
|
| +
|
| + start(handle, params, recognizer_client);
|
| +}
|
| +
|
| +void SpeechRecognitionDispatcher::start(
|
| const WebSpeechRecognitionHandle& handle,
|
| const WebSpeechRecognitionParams& params,
|
| WebSpeechRecognizerClient* recognizer_client) {
|
| DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client);
|
| recognizer_client_ = recognizer_client;
|
|
|
| + // Destroy any previous instance not to starve it waiting on chunk ACKs.
|
| + audio_source_provider_.reset();
|
| +
|
| SpeechRecognitionHostMsg_StartRequest_Params msg_params;
|
| for (size_t i = 0; i < params.grammars().size(); ++i) {
|
| const WebSpeechGrammar& grammar = params.grammars()[i];
|
| @@ -78,6 +102,8 @@ void SpeechRecognitionDispatcher::start(
|
| msg_params.origin_url = params.origin().toString().utf8();
|
| msg_params.render_view_id = routing_id();
|
| msg_params.request_id = GetOrCreateIDForHandle(handle);
|
| + // fall back to default input when the track is not allowed
|
| + msg_params.using_audio_track = !audio_track_.isNull();
|
| // The handle mapping will be removed in |OnRecognitionEnd|.
|
| Send(new SpeechRecognitionHostMsg_StartRequest(msg_params));
|
| }
|
| @@ -85,6 +111,7 @@ void SpeechRecognitionDispatcher::start(
|
| void SpeechRecognitionDispatcher::stop(
|
| const WebSpeechRecognitionHandle& handle,
|
| WebSpeechRecognizerClient* recognizer_client) {
|
| + audio_source_provider_.reset();
|
| // Ignore a |stop| issued without a matching |start|.
|
| if (recognizer_client_ != recognizer_client || !HandleExists(handle))
|
| return;
|
| @@ -95,6 +122,7 @@ void SpeechRecognitionDispatcher::stop(
|
| void SpeechRecognitionDispatcher::abort(
|
| const WebSpeechRecognitionHandle& handle,
|
| WebSpeechRecognizerClient* recognizer_client) {
|
| + audio_source_provider_.reset();
|
| // Ignore an |abort| issued without a matching |start|.
|
| if (recognizer_client_ != recognizer_client || !HandleExists(handle))
|
| return;
|
| @@ -154,6 +182,7 @@ void SpeechRecognitionDispatcher::OnErrorOccurred(
|
| recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id),
|
| WebSpeechRecognitionResult());
|
| } else {
|
| + audio_source_provider_.reset();
|
| recognizer_client_->didReceiveError(
|
| GetHandleFromID(request_id),
|
| WebString(), // TODO(primiano): message?
|
| @@ -174,6 +203,7 @@ void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) {
|
| // didEnd may call back synchronously to start a new recognition session,
|
| // and we don't want to delete the handle from the map after that happens.
|
| handle_map_.erase(request_id);
|
| + audio_source_provider_.reset();
|
| recognizer_client_->didEnd(handle);
|
| }
|
| }
|
| @@ -211,6 +241,26 @@ void SpeechRecognitionDispatcher::OnResultsRetrieved(
|
| GetHandleFromID(request_id), final, provisional);
|
| }
|
|
|
| +void SpeechRecognitionDispatcher::OnAudioTrackReady(
|
| + int request_id,
|
| + const media::AudioParameters& params,
|
| + base::SharedMemoryHandle memory,
|
| + base::SyncSocket::TransitDescriptor descriptor) {
|
| + DCHECK(!audio_source_provider_.get());
|
| + if (audio_track_.isNull()) {
|
| + audio_source_provider_.reset();
|
| + return;
|
| + }
|
| +
|
| + scoped_ptr<base::SyncSocket> socket;
|
| + socket.reset(
|
| + new base::SyncSocket(base::SyncSocket::UnwrapHandle(descriptor)));
|
| +
|
| + audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider(
|
| + audio_track_, params, memory, socket.release(),
|
| + base::Bind(&SpeechRecognitionDispatcher::OnAudioTrackStopped,
|
| + base::Unretained(this))));
|
| +}
|
|
|
| int SpeechRecognitionDispatcher::GetOrCreateIDForHandle(
|
| const WebSpeechRecognitionHandle& handle) {
|
| @@ -239,6 +289,10 @@ bool SpeechRecognitionDispatcher::HandleExists(
|
| return false;
|
| }
|
|
|
| +void SpeechRecognitionDispatcher::OnAudioTrackStopped() {
|
| + audio_track_.reset();
|
| +}
|
| +
|
| const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID(
|
| int request_id) {
|
| HandleMap::iterator iter = handle_map_.find(request_id);
|
|
|