Chromium Code Reviews| Index: content/renderer/speech_recognition_dispatcher.cc |
| diff --git a/content/renderer/speech_recognition_dispatcher.cc b/content/renderer/speech_recognition_dispatcher.cc |
| index 178abf4610c286c751047ff46ec252089f2946bb..9dc1a4108c52fa2cf3635b4919d69a2fdac91d18 100644 |
| --- a/content/renderer/speech_recognition_dispatcher.cc |
| +++ b/content/renderer/speech_recognition_dispatcher.cc |
| @@ -29,13 +29,15 @@ SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( |
| RenderViewImpl* render_view) |
| : RenderViewObserver(render_view), |
| recognizer_client_(NULL), |
| - next_id_(1) { |
| -} |
| + audio_track_set_(false), |
| + is_allowed_audio_track_(false), |
| + next_id_(1) {} |
| SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { |
| } |
| void SpeechRecognitionDispatcher::AbortAllRecognitions() { |
| + audio_source_provider_.reset(); |
| Send(new SpeechRecognitionHostMsg_AbortAllRequests( |
| routing_id())); |
| } |
| @@ -53,11 +55,29 @@ bool SpeechRecognitionDispatcher::OnMessageReceived( |
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) |
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, |
| OnResultsRetrieved) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioTrackReady, OnAudioTrackReady) |
| IPC_MESSAGE_UNHANDLED(handled = false) |
| IPC_END_MESSAGE_MAP() |
| return handled; |
| } |
| +void SpeechRecognitionDispatcher::attach( |
| + const blink::WebSpeechRecognitionHandle& handle, |
| + const blink::WebMediaStreamTrack& audio_track, |
| + blink::WebSpeechRecognizerClient* recognizer_client) { |
| + // Check if track is from an allowed source (microphone only for now) |
| + is_allowed_audio_track_ = |
| + SpeechRecognitionAudioSourceProvider::IsAllowedAudioTrack(audio_track); |
| + audio_track_ = audio_track; |
| + audio_track_set_ = true; |
| +} |
| + |
| +void SpeechRecognitionDispatcher::detach( |
| + const blink::WebSpeechRecognitionHandle& handle, |
| + blink::WebSpeechRecognizerClient* recognizer_client) { |
| + audio_track_set_ = false; |
| +} |
| + |
| void SpeechRecognitionDispatcher::start( |
| const WebSpeechRecognitionHandle& handle, |
| const WebSpeechRecognitionParams& params, |
| @@ -65,6 +85,17 @@ void SpeechRecognitionDispatcher::start( |
| DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); |
| recognizer_client_ = recognizer_client; |
| + // Destroy any previous instance not to starve it waiting on chunk ACKs. |
| + audio_source_provider_.reset(); |
| + |
| + if (audio_track_set_ && !is_allowed_audio_track_) { |
| + // Notify user that the track used is not supported. |
| + recognizer_client_->didReceiveError( |
| + handle, |
| + WebString("Provided audioTrack is not supported. Ignoring track."), |
| + WebSpeechRecognizerClient::NotAllowedError); |
| + } |
| + |
| SpeechRecognitionHostMsg_StartRequest_Params msg_params; |
| for (size_t i = 0; i < params.grammars().size(); ++i) { |
| const WebSpeechGrammar& grammar = params.grammars()[i]; |
| @@ -78,6 +109,8 @@ void SpeechRecognitionDispatcher::start( |
| msg_params.origin_url = params.origin().toString().utf8(); |
| msg_params.render_view_id = routing_id(); |
| msg_params.request_id = GetOrCreateIDForHandle(handle); |
| + // fall back to default input when the track is not allowed |
| + msg_params.using_audio_track = (audio_track_set_ && is_allowed_audio_track_); |
| // The handle mapping will be removed in |OnRecognitionEnd|. |
| Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); |
| } |
| @@ -85,6 +118,7 @@ void SpeechRecognitionDispatcher::start( |
| void SpeechRecognitionDispatcher::stop( |
| const WebSpeechRecognitionHandle& handle, |
| WebSpeechRecognizerClient* recognizer_client) { |
| + audio_source_provider_.reset(); |
| // Ignore a |stop| issued without a matching |start|. |
| if (recognizer_client_ != recognizer_client || !HandleExists(handle)) |
| return; |
| @@ -95,6 +129,7 @@ void SpeechRecognitionDispatcher::stop( |
| void SpeechRecognitionDispatcher::abort( |
| const WebSpeechRecognitionHandle& handle, |
| WebSpeechRecognizerClient* recognizer_client) { |
| + audio_source_provider_.reset(); |
| // Ignore an |abort| issued without a matching |start|. |
| if (recognizer_client_ != recognizer_client || !HandleExists(handle)) |
| return; |
| @@ -154,6 +189,7 @@ void SpeechRecognitionDispatcher::OnErrorOccurred( |
| recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id), |
| WebSpeechRecognitionResult()); |
| } else { |
| + audio_source_provider_.reset(); |
| recognizer_client_->didReceiveError( |
| GetHandleFromID(request_id), |
| WebString(), // TODO(primiano): message? |
| @@ -174,6 +210,7 @@ void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) { |
| // didEnd may call back synchronously to start a new recognition session, |
| // and we don't want to delete the handle from the map after that happens. |
| handle_map_.erase(request_id); |
| + audio_source_provider_.reset(); |
| recognizer_client_->didEnd(handle); |
| } |
| } |
| @@ -211,6 +248,26 @@ void SpeechRecognitionDispatcher::OnResultsRetrieved( |
| GetHandleFromID(request_id), final, provisional); |
| } |
| +// TODO(burnik): Each param on it's own line. |
| +void SpeechRecognitionDispatcher::OnAudioTrackReady( |
| + int request_id, const media::AudioParameters& params, |
| + base::SharedMemoryHandle memory, |
| + base::SyncSocket::TransitDescriptor descriptor) { |
| + DCHECK(!audio_source_provider_.get()); |
| + if (audio_track_.isNull()) { |
| + audio_source_provider_.reset(); |
| + return; |
| + } |
| + |
| + scoped_ptr<base::SyncSocket> socket; |
| + socket.reset( |
| + new base::SyncSocket(base::SyncSocket::UnwrapHandle(descriptor))); |
| + |
| + audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider( |
| + audio_track_, params, memory, socket.release(), |
| + base::Bind(&SpeechRecognitionDispatcher::OnAudioTrackError, |
| + base::Unretained(this)))); |
| +} |
| int SpeechRecognitionDispatcher::GetOrCreateIDForHandle( |
| const WebSpeechRecognitionHandle& handle) { |
| @@ -239,6 +296,25 @@ bool SpeechRecognitionDispatcher::HandleExists( |
| return false; |
| } |
| +void SpeechRecognitionDispatcher::OnAudioTrackError( |
|
burnik
2014/09/12 12:09:12
Clearly not useful yet. Consider it a stub.
burnik
2014/09/15 15:00:07
Refactored stub - just used for detecting a stop.
|
| + SpeechRecognitionAudioSourceProvider::ErrorState error) { |
| + // TODO(burnik): handle error state events. |
| + switch (error) { |
| + case SpeechRecognitionAudioSourceProvider::ErrorState::SEND_FAILED: |
| + DLOG(ERROR) << "SEND_FAILED"; |
| + break; |
| + case SpeechRecognitionAudioSourceProvider::ErrorState::BUFFER_SYNC_LAG: |
| + DLOG(ERROR) << "BUFFER_SYNC_LAG"; |
| + break; |
| + case SpeechRecognitionAudioSourceProvider::ErrorState::AUDIO_FIFO_OVERFLOW: |
| + DLOG(ERROR) << "AUDIO_FIFO_OVERFLOW"; |
| + break; |
| + case SpeechRecognitionAudioSourceProvider::ErrorState::TRACK_STOPPED: |
| + DLOG(ERROR) << "TRACK_STOPPED"; |
| + break; |
| + } |
| +} |
| + |
| const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( |
| int request_id) { |
| HandleMap::iterator iter = handle_map_.find(request_id); |