Chromium Code Reviews| Index: content/renderer/speech_recognition_dispatcher.cc |
| diff --git a/content/renderer/speech_recognition_dispatcher.cc b/content/renderer/speech_recognition_dispatcher.cc |
| index 178abf4610c286c751047ff46ec252089f2946bb..563f4713dbc803d463ff6f9f74cf1f733c28f2cb 100644 |
| --- a/content/renderer/speech_recognition_dispatcher.cc |
| +++ b/content/renderer/speech_recognition_dispatcher.cc |
| @@ -29,13 +29,13 @@ SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( |
| RenderViewImpl* render_view) |
| : RenderViewObserver(render_view), |
| recognizer_client_(NULL), |
| - next_id_(1) { |
| -} |
| + next_id_(1) { } |
| SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { |
| } |
| void SpeechRecognitionDispatcher::AbortAllRecognitions() { |
| + audio_source_provider_.reset(); |
| Send(new SpeechRecognitionHostMsg_AbortAllRequests( |
| routing_id())); |
| } |
| @@ -53,6 +53,7 @@ bool SpeechRecognitionDispatcher::OnMessageReceived( |
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) |
| IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, |
| OnResultsRetrieved) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioTrackReady, OnAudioTrackReady) |
| IPC_MESSAGE_UNHANDLED(handled = false) |
| IPC_END_MESSAGE_MAP() |
| return handled; |
| @@ -65,6 +66,24 @@ void SpeechRecognitionDispatcher::start( |
| DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); |
| recognizer_client_ = recognizer_client; |
| + const blink::WebMediaStreamTrack track = params.audioTrack(); |
| + if (!track.isNull()) { |
| + // Check if this type of track is allowed by implemented policy. |
| + if (SpeechRecognitionAudioSourceProvider::IsSupportedTrack(track)) { |
| + audio_track_.assign(track); |
| + } else { |
| + audio_track_.reset(); |
| + // Notify user that the track used is not supported. |
| + recognizer_client_->didReceiveError( |
| + handle, |
| + WebString("Provided audioTrack is not supported. Ignoring track."), |
| + WebSpeechRecognizerClient::NotAllowedError); |
|
no longer working on chromium
2014/09/23 10:09:13
return here, since we are failing the start()
burnik
2014/09/23 12:39:21
Done.
|
| + } |
| + } |
| + |
| + // Destroy any previous instance not to starve it waiting on chunk ACKs. |
|
no longer working on chromium
2014/09/23 10:09:13
Please add more comment to explain why we stop the
burnik
2014/09/23 12:39:21
Done.
|
| + audio_source_provider_.reset(); |
| + |
| SpeechRecognitionHostMsg_StartRequest_Params msg_params; |
| for (size_t i = 0; i < params.grammars().size(); ++i) { |
| const WebSpeechGrammar& grammar = params.grammars()[i]; |
| @@ -78,6 +97,8 @@ void SpeechRecognitionDispatcher::start( |
| msg_params.origin_url = params.origin().toString().utf8(); |
| msg_params.render_view_id = routing_id(); |
| msg_params.request_id = GetOrCreateIDForHandle(handle); |
| + // fall back to default input when the track is not allowed |
| + msg_params.using_audio_track = !audio_track_.isNull(); |
| // The handle mapping will be removed in |OnRecognitionEnd|. |
| Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); |
| } |
| @@ -85,6 +106,7 @@ void SpeechRecognitionDispatcher::start( |
| void SpeechRecognitionDispatcher::stop( |
| const WebSpeechRecognitionHandle& handle, |
| WebSpeechRecognizerClient* recognizer_client) { |
| + audio_source_provider_.reset(); |
| // Ignore a |stop| issued without a matching |start|. |
| if (recognizer_client_ != recognizer_client || !HandleExists(handle)) |
| return; |
| @@ -95,6 +117,7 @@ void SpeechRecognitionDispatcher::stop( |
| void SpeechRecognitionDispatcher::abort( |
| const WebSpeechRecognitionHandle& handle, |
| WebSpeechRecognizerClient* recognizer_client) { |
| + audio_source_provider_.reset(); |
| // Ignore an |abort| issued without a matching |start|. |
| if (recognizer_client_ != recognizer_client || !HandleExists(handle)) |
| return; |
| @@ -154,6 +177,7 @@ void SpeechRecognitionDispatcher::OnErrorOccurred( |
| recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id), |
|
no longer working on chromium
2014/09/23 10:09:13
what will happen when getting a didReceiveNoMatch
burnik
2014/09/23 12:39:21
This is an API protocol thing as far as I know, no
|
| WebSpeechRecognitionResult()); |
| } else { |
| + audio_source_provider_.reset(); |
| recognizer_client_->didReceiveError( |
| GetHandleFromID(request_id), |
| WebString(), // TODO(primiano): message? |
| @@ -174,6 +198,7 @@ void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) { |
| // didEnd may call back synchronously to start a new recognition session, |
| // and we don't want to delete the handle from the map after that happens. |
| handle_map_.erase(request_id); |
| + audio_source_provider_.reset(); |
| recognizer_client_->didEnd(handle); |
| } |
| } |
| @@ -211,6 +236,26 @@ void SpeechRecognitionDispatcher::OnResultsRetrieved( |
| GetHandleFromID(request_id), final, provisional); |
| } |
| +void SpeechRecognitionDispatcher::OnAudioTrackReady( |
| + int request_id, |
| + const media::AudioParameters& params, |
| + base::SharedMemoryHandle memory, |
| + base::SyncSocket::TransitDescriptor descriptor) { |
| + DCHECK(!audio_source_provider_.get()); |
| + if (audio_track_.isNull()) { |
| + audio_source_provider_.reset(); |
| + return; |
| + } |
| + |
| + // Create socket here and pass ownership to the |audio_source_provider_|. |
| + scoped_ptr<base::SyncSocket> socket( |
| + new base::SyncSocket(base::SyncSocket::UnwrapHandle(descriptor))); |
| + |
| + audio_source_provider_.reset(new SpeechRecognitionAudioSourceProvider( |
| + audio_track_, params, memory, socket.release(), |
|
no longer working on chromium
2014/09/23 10:09:13
you don't need to create a socket at all, just pas
burnik
2014/09/23 12:39:21
That would be true if the unit test did not have a
|
| + base::Bind(&SpeechRecognitionDispatcher::OnAudioTrackStopped, |
| + base::Unretained(this)))); |
| +} |
| int SpeechRecognitionDispatcher::GetOrCreateIDForHandle( |
| const WebSpeechRecognitionHandle& handle) { |
| @@ -239,6 +284,10 @@ bool SpeechRecognitionDispatcher::HandleExists( |
| return false; |
| } |
| +void SpeechRecognitionDispatcher::OnAudioTrackStopped() { |
| + audio_track_.reset(); |
| +} |
| + |
| const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( |
| int request_id) { |
| HandleMap::iterator iter = handle_map_.find(request_id); |