| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/browser/speech/speech_recognition_manager_impl.h" | 5 #include "content/browser/speech/speech_recognition_manager_impl.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "content/browser/browser_main_loop.h" | 8 #include "content/browser/browser_main_loop.h" |
| 9 #include "content/browser/renderer_host/media/media_stream_manager.h" | 9 #include "content/browser/renderer_host/media/media_stream_manager.h" |
| 10 #include "content/browser/renderer_host/media/media_stream_ui_proxy.h" |
| 10 #include "content/browser/speech/google_one_shot_remote_engine.h" | 11 #include "content/browser/speech/google_one_shot_remote_engine.h" |
| 11 #include "content/browser/speech/google_streaming_remote_engine.h" | 12 #include "content/browser/speech/google_streaming_remote_engine.h" |
| 12 #include "content/browser/speech/speech_recognition_engine.h" | 13 #include "content/browser/speech/speech_recognition_engine.h" |
| 13 #include "content/browser/speech/speech_recognizer.h" | 14 #include "content/browser/speech/speech_recognizer.h" |
| 14 #include "content/public/browser/browser_thread.h" | 15 #include "content/public/browser/browser_thread.h" |
| 15 #include "content/public/browser/content_browser_client.h" | 16 #include "content/public/browser/content_browser_client.h" |
| 16 #include "content/public/browser/resource_context.h" | 17 #include "content/public/browser/resource_context.h" |
| 17 #include "content/public/browser/speech_recognition_event_listener.h" | 18 #include "content/public/browser/speech_recognition_event_listener.h" |
| 18 #include "content/public/browser/speech_recognition_manager_delegate.h" | 19 #include "content/public/browser/speech_recognition_manager_delegate.h" |
| 19 #include "content/public/browser/speech_recognition_session_config.h" | 20 #include "content/public/browser/speech_recognition_session_config.h" |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 61 delegate_(GetContentClient()->browser()-> | 62 delegate_(GetContentClient()->browser()-> |
| 62 GetSpeechRecognitionManagerDelegate()), | 63 GetSpeechRecognitionManagerDelegate()), |
| 63 ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) { | 64 ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) { |
| 64 DCHECK(!g_speech_recognition_manager_impl); | 65 DCHECK(!g_speech_recognition_manager_impl); |
| 65 g_speech_recognition_manager_impl = this; | 66 g_speech_recognition_manager_impl = this; |
| 66 } | 67 } |
| 67 | 68 |
| 68 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { | 69 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { |
| 69 DCHECK(g_speech_recognition_manager_impl); | 70 DCHECK(g_speech_recognition_manager_impl); |
| 70 g_speech_recognition_manager_impl = NULL; | 71 g_speech_recognition_manager_impl = NULL; |
| 71 // Recognition sessions will be aborted by the corresponding destructors. | 72 |
| 73 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); |
| 74 ++it) { |
| 75 // MediaStreamUIProxy must be deleted on the IO thread. |
| 76 BrowserThread::DeleteSoon(BrowserThread::IO, FROM_HERE, |
| 77 it->second->ui.release()); |
| 78 delete it->second; |
| 79 } |
| 72 sessions_.clear(); | 80 sessions_.clear(); |
| 73 } | 81 } |
| 74 | 82 |
| 75 int SpeechRecognitionManagerImpl::CreateSession( | 83 int SpeechRecognitionManagerImpl::CreateSession( |
| 76 const SpeechRecognitionSessionConfig& config) { | 84 const SpeechRecognitionSessionConfig& config) { |
| 77 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 85 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 78 | 86 |
| 79 const int session_id = GetNextSessionID(); | 87 const int session_id = GetNextSessionID(); |
| 80 DCHECK(!SessionExists(session_id)); | 88 DCHECK(!SessionExists(session_id)); |
| 81 // Set-up the new session. | 89 // Set-up the new session. |
| 82 Session& session = sessions_[session_id]; | 90 Session* session = new Session(); |
| 83 session.id = session_id; | 91 sessions_[session_id] = session; |
| 84 session.config = config; | 92 session->id = session_id; |
| 85 session.context = config.initial_context; | 93 session->config = config; |
| 94 session->context = config.initial_context; |
| 86 | 95 |
| 87 std::string hardware_info; | 96 std::string hardware_info; |
| 88 bool can_report_metrics = false; | 97 bool can_report_metrics = false; |
| 89 if (delegate_.get()) | 98 if (delegate_.get()) |
| 90 delegate_->GetDiagnosticInformation(&can_report_metrics, &hardware_info); | 99 delegate_->GetDiagnosticInformation(&can_report_metrics, &hardware_info); |
| 91 | 100 |
| 92 SpeechRecognitionEngineConfig remote_engine_config; | 101 SpeechRecognitionEngineConfig remote_engine_config; |
| 93 remote_engine_config.language = config.language; | 102 remote_engine_config.language = config.language; |
| 94 remote_engine_config.grammars = config.grammars; | 103 remote_engine_config.grammars = config.grammars; |
| 95 remote_engine_config.audio_sample_rate = SpeechRecognizer::kAudioSampleRate; | 104 remote_engine_config.audio_sample_rate = SpeechRecognizer::kAudioSampleRate; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 110 } else { | 119 } else { |
| 111 google_remote_engine = | 120 google_remote_engine = |
| 112 new GoogleStreamingRemoteEngine(config.url_request_context_getter); | 121 new GoogleStreamingRemoteEngine(config.url_request_context_getter); |
| 113 } | 122 } |
| 114 | 123 |
| 115 google_remote_engine->SetConfig(remote_engine_config); | 124 google_remote_engine->SetConfig(remote_engine_config); |
| 116 | 125 |
| 117 // The legacy api cannot use continuous mode. | 126 // The legacy api cannot use continuous mode. |
| 118 DCHECK(!config.is_legacy_api || !config.continuous); | 127 DCHECK(!config.is_legacy_api || !config.continuous); |
| 119 | 128 |
| 120 session.recognizer = new SpeechRecognizer( | 129 session->recognizer = new SpeechRecognizer( |
| 121 this, | 130 this, |
| 122 session_id, | 131 session_id, |
| 123 !config.continuous, | 132 !config.continuous, |
| 124 google_remote_engine); | 133 google_remote_engine); |
| 125 return session_id; | 134 return session_id; |
| 126 } | 135 } |
| 127 | 136 |
| 128 void SpeechRecognitionManagerImpl::StartSession(int session_id) { | 137 void SpeechRecognitionManagerImpl::StartSession(int session_id) { |
| 129 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 138 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 130 if (!SessionExists(session_id)) | 139 if (!SessionExists(session_id)) |
| (...skipping 24 matching lines...) Expand all Loading... |
| 155 return; | 164 return; |
| 156 | 165 |
| 157 #if defined(OS_IOS) | 166 #if defined(OS_IOS) |
| 158 // On iOS, voice search can only be initiated by clear user action and thus | 167 // On iOS, voice search can only be initiated by clear user action and thus |
| 159 // it is always allowed. | 168 // it is always allowed. |
| 160 DCHECK(!ask_user && is_allowed); | 169 DCHECK(!ask_user && is_allowed); |
| 161 #else | 170 #else |
| 162 if (ask_user) { | 171 if (ask_user) { |
| 163 SessionsTable::iterator iter = sessions_.find(session_id); | 172 SessionsTable::iterator iter = sessions_.find(session_id); |
| 164 DCHECK(iter != sessions_.end()); | 173 DCHECK(iter != sessions_.end()); |
| 165 SpeechRecognitionSessionContext& context = iter->second.context; | 174 SpeechRecognitionSessionContext& context = iter->second->context; |
| 166 context.label = | 175 context.label = |
| 167 BrowserMainLoop::GetMediaStreamManager()->MakeMediaAccessRequest( | 176 BrowserMainLoop::GetMediaStreamManager()->MakeMediaAccessRequest( |
| 168 context.render_process_id, | 177 context.render_process_id, |
| 169 context.render_view_id, | 178 context.render_view_id, |
| 170 StreamOptions(MEDIA_DEVICE_AUDIO_CAPTURE, MEDIA_NO_SERVICE), | 179 StreamOptions(MEDIA_DEVICE_AUDIO_CAPTURE, MEDIA_NO_SERVICE), |
| 171 GURL(context.context_name), | 180 GURL(context.context_name), |
| 172 base::Bind( | 181 base::Bind( |
| 173 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, | 182 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, |
| 174 weak_factory_.GetWeakPtr())); | 183 weak_factory_.GetWeakPtr(), session_id)); |
| 175 | |
| 176 return; | 184 return; |
| 177 } | 185 } |
| 178 #endif // defined(OS_IOS) | 186 #endif // defined(OS_IOS) |
| 179 | 187 |
| 180 if (is_allowed) { | 188 if (is_allowed) { |
| 181 MessageLoop::current()->PostTask(FROM_HERE, | 189 MessageLoop::current()->PostTask(FROM_HERE, |
| 182 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 190 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
| 183 weak_factory_.GetWeakPtr(), session_id, EVENT_START)); | 191 weak_factory_.GetWeakPtr(), session_id, EVENT_START)); |
| 184 } else { | 192 } else { |
| 185 OnRecognitionError(session_id, SpeechRecognitionError( | 193 OnRecognitionError(session_id, SpeechRecognitionError( |
| 186 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED)); | 194 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED)); |
| 187 MessageLoop::current()->PostTask(FROM_HERE, | 195 MessageLoop::current()->PostTask(FROM_HERE, |
| 188 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 196 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
| 189 weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); | 197 weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); |
| 190 } | 198 } |
| 191 } | 199 } |
| 192 | 200 |
| 193 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback( | 201 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback( |
| 194 const std::string& label, const MediaStreamDevices& devices) { | 202 int session_id, |
| 203 const MediaStreamDevices& devices, |
| 204 scoped_ptr<MediaStreamUIProxy> stream_ui) { |
| 195 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 205 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 196 for (SessionsTable::iterator iter = sessions_.begin(); | |
| 197 iter != sessions_.end(); ++iter) { | |
| 198 if (iter->second.context.label == label) { | |
| 199 bool is_allowed = false; | |
| 200 if (!devices.empty()) { | |
| 201 // Copy the approved devices array to the context for UI indication. | |
| 202 iter->second.context.devices = devices; | |
| 203 is_allowed = true; | |
| 204 } | |
| 205 | 206 |
| 206 // Clear the label to indicate the request has been done. | 207 SessionsTable::iterator iter = sessions_.find(session_id); |
| 207 iter->second.context.label.clear(); | 208 if (iter == sessions_.end()) |
| 209 return; |
| 208 | 210 |
| 209 // Notify the recognition about the request result. | 211 bool is_allowed = !devices.empty(); |
| 210 RecognitionAllowedCallback(iter->first, false, is_allowed); | 212 if (is_allowed) { |
| 211 break; | 213 // Copy the approved devices array to the context for UI indication. |
| 212 } | 214 iter->second->context.devices = devices; |
| 213 } | 215 } |
| 216 |
| 217 // Clear the label to indicate the request has been done. |
| 218 iter->second->context.label.clear(); |
| 219 |
| 220 // Notify the recognition about the request result. |
| 221 RecognitionAllowedCallback(iter->first, false, is_allowed); |
| 214 } | 222 } |
| 215 | 223 |
| 216 void SpeechRecognitionManagerImpl::AbortSession(int session_id) { | 224 void SpeechRecognitionManagerImpl::AbortSession(int session_id) { |
| 217 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 225 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 218 if (!SessionExists(session_id)) | 226 if (!SessionExists(session_id)) |
| 219 return; | 227 return; |
| 220 | 228 |
| 221 #if !defined(OS_IOS) | 229 SessionsTable::iterator iter = sessions_.find(session_id); |
| 222 const SpeechRecognitionSessionContext& context = | 230 iter->second->ui.reset(); |
| 223 GetSessionContext(session_id); | |
| 224 if (!context.label.empty()) | |
| 225 BrowserMainLoop::GetMediaStreamManager()->CancelRequest(context.label); | |
| 226 #endif // !defined(OS_IOS) | |
| 227 | 231 |
| 228 MessageLoop::current()->PostTask(FROM_HERE, | 232 MessageLoop::current()->PostTask(FROM_HERE, |
| 229 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 233 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
| 230 weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); | 234 weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); |
| 231 } | 235 } |
| 232 | 236 |
| 233 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) { | 237 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) { |
| 234 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 238 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 235 if (!SessionExists(session_id)) | 239 if (!SessionExists(session_id)) |
| 236 return; | 240 return; |
| 237 | 241 |
| 238 #if !defined(OS_IOS) | 242 SessionsTable::iterator iter = sessions_.find(session_id); |
| 239 const SpeechRecognitionSessionContext& context = | 243 iter->second->ui.reset(); |
| 240 GetSessionContext(session_id); | |
| 241 if (!context.label.empty()) | |
| 242 BrowserMainLoop::GetMediaStreamManager()->CancelRequest(context.label); | |
| 243 #endif // !defined(OS_IOS) | |
| 244 | 244 |
| 245 MessageLoop::current()->PostTask(FROM_HERE, | 245 MessageLoop::current()->PostTask(FROM_HERE, |
| 246 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 246 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
| 247 weak_factory_.GetWeakPtr(), session_id, EVENT_STOP_CAPTURE)); | 247 weak_factory_.GetWeakPtr(), session_id, EVENT_STOP_CAPTURE)); |
| 248 } | 248 } |
| 249 | 249 |
| 250 // Here begins the SpeechRecognitionEventListener interface implementation, | 250 // Here begins the SpeechRecognitionEventListener interface implementation, |
| 251 // which will simply relay the events to the proper listener registered for the | 251 // which will simply relay the events to the proper listener registered for the |
| 252 // particular session (most likely InputTagSpeechDispatcherHost) and to the | 252 // particular session (most likely InputTagSpeechDispatcherHost) and to the |
| 253 // catch-all listener provided by the delegate (if any). | 253 // catch-all listener provided by the delegate (if any). |
| 254 | 254 |
| 255 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { | 255 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { |
| 256 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 256 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 257 if (!SessionExists(session_id)) | 257 if (!SessionExists(session_id)) |
| 258 return; | 258 return; |
| 259 | 259 |
| 260 #if !defined(OS_IOS) | 260 SessionsTable::iterator iter = sessions_.find(session_id); |
| 261 const SpeechRecognitionSessionContext& context = | 261 if (iter->second->ui) { |
| 262 GetSessionContext(session_id); | 262 // Notify the UI that the devices are being used. |
| 263 if (!context.devices.empty()) { | 263 iter->second->ui->OnStarted(base::Closure()); |
| 264 // Notify the UI the devices are being used. | |
| 265 BrowserMainLoop::GetMediaStreamManager()->NotifyUIDevicesOpened( | |
| 266 context.label); | |
| 267 } | 264 } |
| 268 #endif // !defined(OS_IOS) | |
| 269 | 265 |
| 270 DCHECK_EQ(primary_session_id_, session_id); | 266 DCHECK_EQ(primary_session_id_, session_id); |
| 271 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) | 267 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) |
| 272 delegate_listener->OnRecognitionStart(session_id); | 268 delegate_listener->OnRecognitionStart(session_id); |
| 273 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) | 269 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) |
| 274 listener->OnRecognitionStart(session_id); | 270 listener->OnRecognitionStart(session_id); |
| 275 } | 271 } |
| 276 | 272 |
| 277 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { | 273 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { |
| 278 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 274 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 369 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) | 365 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) |
| 370 delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume); | 366 delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume); |
| 371 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) | 367 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) |
| 372 listener->OnAudioLevelsChange(session_id, volume, noise_volume); | 368 listener->OnAudioLevelsChange(session_id, volume, noise_volume); |
| 373 } | 369 } |
| 374 | 370 |
| 375 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { | 371 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { |
| 376 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 372 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 377 if (!SessionExists(session_id)) | 373 if (!SessionExists(session_id)) |
| 378 return; | 374 return; |
| 379 #if !defined(OS_IOS) | |
| 380 const SpeechRecognitionSessionContext& context = | |
| 381 GetSessionContext(session_id); | |
| 382 if (!context.devices.empty()) { | |
| 383 // Notify the UI the devices has been closed. | |
| 384 BrowserMainLoop::GetMediaStreamManager()->NotifyUIDevicesClosed( | |
| 385 context.label); | |
| 386 } | |
| 387 #endif // !defined(OS_IOS) | |
| 388 | 375 |
| 389 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) | 376 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) |
| 390 delegate_listener->OnRecognitionEnd(session_id); | 377 delegate_listener->OnRecognitionEnd(session_id); |
| 391 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) | 378 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) |
| 392 listener->OnRecognitionEnd(session_id); | 379 listener->OnRecognitionEnd(session_id); |
| 393 MessageLoop::current()->PostTask(FROM_HERE, | 380 MessageLoop::current()->PostTask(FROM_HERE, |
| 394 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 381 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
| 395 weak_factory_.GetWeakPtr(), | 382 weak_factory_.GetWeakPtr(), |
| 396 session_id, | 383 session_id, |
| 397 EVENT_RECOGNITION_ENDED)); | 384 EVENT_RECOGNITION_ENDED)); |
| 398 } | 385 } |
| 399 | 386 |
| 400 int SpeechRecognitionManagerImpl::GetSession( | 387 int SpeechRecognitionManagerImpl::GetSession( |
| 401 int render_process_id, int render_view_id, int request_id) const { | 388 int render_process_id, int render_view_id, int request_id) const { |
| 402 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 389 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 403 SessionsTable::const_iterator iter; | 390 SessionsTable::const_iterator iter; |
| 404 for(iter = sessions_.begin(); iter != sessions_.end(); ++iter) { | 391 for(iter = sessions_.begin(); iter != sessions_.end(); ++iter) { |
| 405 const int session_id = iter->first; | 392 const int session_id = iter->first; |
| 406 const SpeechRecognitionSessionContext& context = iter->second.context; | 393 const SpeechRecognitionSessionContext& context = iter->second->context; |
| 407 if (context.render_process_id == render_process_id && | 394 if (context.render_process_id == render_process_id && |
| 408 context.render_view_id == render_view_id && | 395 context.render_view_id == render_view_id && |
| 409 context.request_id == request_id) { | 396 context.request_id == request_id) { |
| 410 return session_id; | 397 return session_id; |
| 411 } | 398 } |
| 412 } | 399 } |
| 413 return kSessionIDInvalid; | 400 return kSessionIDInvalid; |
| 414 } | 401 } |
| 415 | 402 |
| 416 SpeechRecognitionSessionContext | 403 SpeechRecognitionSessionContext |
| 417 SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const { | 404 SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const { |
| 418 return GetSession(session_id).context; | 405 return GetSession(session_id)->context; |
| 419 } | 406 } |
| 420 | 407 |
| 421 void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( | 408 void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( |
| 422 SpeechRecognitionEventListener* listener) { | 409 SpeechRecognitionEventListener* listener) { |
| 423 // This method gracefully destroys sessions for the listener. However, since | 410 // This method gracefully destroys sessions for the listener. However, since |
| 424 // the listener itself is likely to be destroyed after this call, we avoid | 411 // the listener itself is likely to be destroyed after this call, we avoid |
| 425 // dispatching further events to it, marking the |listener_is_active| flag. | 412 // dispatching further events to it, marking the |listener_is_active| flag. |
| 426 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 413 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 427 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); | 414 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); |
| 428 ++it) { | 415 ++it) { |
| 429 Session& session = it->second; | 416 Session* session = it->second; |
| 430 if (session.config.event_listener == listener) { | 417 if (session->config.event_listener == listener) { |
| 431 AbortSession(session.id); | 418 AbortSession(session->id); |
| 432 session.listener_is_active = false; | 419 session->listener_is_active = false; |
| 433 } | 420 } |
| 434 } | 421 } |
| 435 } | 422 } |
| 436 | 423 |
| 437 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView( | 424 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView( |
| 438 int render_process_id, | 425 int render_process_id, |
| 439 int render_view_id) { | 426 int render_view_id) { |
| 440 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 427 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 441 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); | 428 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); |
| 442 ++it) { | 429 ++it) { |
| 443 Session& session = it->second; | 430 Session* session = it->second; |
| 444 if (session.context.render_process_id == render_process_id && | 431 if (session->context.render_process_id == render_process_id && |
| 445 session.context.render_view_id == render_view_id) { | 432 session->context.render_view_id == render_view_id) { |
| 446 AbortSession(session.id); | 433 AbortSession(session->id); |
| 447 } | 434 } |
| 448 } | 435 } |
| 449 } | 436 } |
| 450 | 437 |
| 451 // ----------------------- Core FSM implementation --------------------------- | 438 // ----------------------- Core FSM implementation --------------------------- |
| 452 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, | 439 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, |
| 453 FSMEvent event) { | 440 FSMEvent event) { |
| 454 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 441 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 455 | 442 |
| 456 // There are some corner cases in which the session might be deleted (due to | 443 // There are some corner cases in which the session might be deleted (due to |
| 457 // an EndRecognition event) between a request (e.g. Abort) and its dispatch. | 444 // an EndRecognition event) between a request (e.g. Abort) and its dispatch. |
| 458 if (!SessionExists(session_id)) | 445 if (!SessionExists(session_id)) |
| 459 return; | 446 return; |
| 460 | 447 |
| 461 const Session& session = GetSession(session_id); | 448 Session* session = GetSession(session_id); |
| 462 FSMState session_state = GetSessionState(session_id); | 449 FSMState session_state = GetSessionState(session_id); |
| 463 DCHECK_LE(session_state, SESSION_STATE_MAX_VALUE); | 450 DCHECK_LE(session_state, SESSION_STATE_MAX_VALUE); |
| 464 DCHECK_LE(event, EVENT_MAX_VALUE); | 451 DCHECK_LE(event, EVENT_MAX_VALUE); |
| 465 | 452 |
| 466 // Event dispatching must be sequential, otherwise it will break all the rules | 453 // Event dispatching must be sequential, otherwise it will break all the rules |
| 467 // and the assumptions of the finite state automata model. | 454 // and the assumptions of the finite state automata model. |
| 468 DCHECK(!is_dispatching_event_); | 455 DCHECK(!is_dispatching_event_); |
| 469 is_dispatching_event_ = true; | 456 is_dispatching_event_ = true; |
| 470 ExecuteTransitionAndGetNextState(session, session_state, event); | 457 ExecuteTransitionAndGetNextState(session, session_state, event); |
| 471 is_dispatching_event_ = false; | 458 is_dispatching_event_ = false; |
| 472 } | 459 } |
| 473 | 460 |
| 474 // This FSM handles the evolution of each session, from the viewpoint of the | 461 // This FSM handles the evolution of each session, from the viewpoint of the |
| 475 // interaction with the user (that may be either the browser end-user which | 462 // interaction with the user (that may be either the browser end-user which |
| 476 // interacts with UI bubbles, or JS developer intracting with JS methods). | 463 // interacts with UI bubbles, or JS developer intracting with JS methods). |
| 477 // All the events received by the SpeechRecognizer instances (one for each | 464 // All the events received by the SpeechRecognizer instances (one for each |
| 478 // session) are always routed to the SpeechRecognitionEventListener(s) | 465 // session) are always routed to the SpeechRecognitionEventListener(s) |
| 479 // regardless the choices taken in this FSM. | 466 // regardless the choices taken in this FSM. |
| 480 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( | 467 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( |
| 481 const Session& session, FSMState session_state, FSMEvent event) { | 468 Session* session, FSMState session_state, FSMEvent event) { |
| 482 // Note: since we're not tracking the state of the recognizer object, rather | 469 // Note: since we're not tracking the state of the recognizer object, rather |
| 483 // we're directly retrieving it (through GetSessionState), we see its events | 470 // we're directly retrieving it (through GetSessionState), we see its events |
| 484 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution | 471 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution |
| 485 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just | 472 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just |
| 486 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus | 473 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus |
| 487 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT). | 474 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT). |
| 488 // This makes the code below a bit tricky but avoids a lot of code for | 475 // This makes the code below a bit tricky but avoids a lot of code for |
| 489 // tracking and reconstructing asynchronously the state of the recognizer. | 476 // tracking and reconstructing asynchronously the state of the recognizer. |
| 490 switch (session_state) { | 477 switch (session_state) { |
| 491 case SESSION_STATE_IDLE: | 478 case SESSION_STATE_IDLE: |
| 492 switch (event) { | 479 switch (event) { |
| 493 case EVENT_START: | 480 case EVENT_START: |
| 494 return SessionStart(session); | 481 return SessionStart(*session); |
| 495 case EVENT_ABORT: | 482 case EVENT_ABORT: |
| 496 return SessionAbort(session); | 483 return SessionAbort(*session); |
| 497 case EVENT_RECOGNITION_ENDED: | 484 case EVENT_RECOGNITION_ENDED: |
| 498 return SessionDelete(session); | 485 return SessionDelete(session); |
| 499 case EVENT_STOP_CAPTURE: | 486 case EVENT_STOP_CAPTURE: |
| 500 return SessionStopAudioCapture(session); | 487 return SessionStopAudioCapture(*session); |
| 501 case EVENT_AUDIO_ENDED: | 488 case EVENT_AUDIO_ENDED: |
| 502 return; | 489 return; |
| 503 } | 490 } |
| 504 break; | 491 break; |
| 505 case SESSION_STATE_CAPTURING_AUDIO: | 492 case SESSION_STATE_CAPTURING_AUDIO: |
| 506 switch (event) { | 493 switch (event) { |
| 507 case EVENT_STOP_CAPTURE: | 494 case EVENT_STOP_CAPTURE: |
| 508 return SessionStopAudioCapture(session); | 495 return SessionStopAudioCapture(*session); |
| 509 case EVENT_ABORT: | 496 case EVENT_ABORT: |
| 510 return SessionAbort(session); | 497 return SessionAbort(*session); |
| 511 case EVENT_START: | 498 case EVENT_START: |
| 512 return; | 499 return; |
| 513 case EVENT_AUDIO_ENDED: | 500 case EVENT_AUDIO_ENDED: |
| 514 case EVENT_RECOGNITION_ENDED: | 501 case EVENT_RECOGNITION_ENDED: |
| 515 return NotFeasible(session, event); | 502 return NotFeasible(*session, event); |
| 516 } | 503 } |
| 517 break; | 504 break; |
| 518 case SESSION_STATE_WAITING_FOR_RESULT: | 505 case SESSION_STATE_WAITING_FOR_RESULT: |
| 519 switch (event) { | 506 switch (event) { |
| 520 case EVENT_ABORT: | 507 case EVENT_ABORT: |
| 521 return SessionAbort(session); | 508 return SessionAbort(*session); |
| 522 case EVENT_AUDIO_ENDED: | 509 case EVENT_AUDIO_ENDED: |
| 523 return ResetCapturingSessionId(session); | 510 return ResetCapturingSessionId(*session); |
| 524 case EVENT_START: | 511 case EVENT_START: |
| 525 case EVENT_STOP_CAPTURE: | 512 case EVENT_STOP_CAPTURE: |
| 526 return; | 513 return; |
| 527 case EVENT_RECOGNITION_ENDED: | 514 case EVENT_RECOGNITION_ENDED: |
| 528 return NotFeasible(session, event); | 515 return NotFeasible(*session, event); |
| 529 } | 516 } |
| 530 break; | 517 break; |
| 531 } | 518 } |
| 532 return NotFeasible(session, event); | 519 return NotFeasible(*session, event); |
| 533 } | 520 } |
| 534 | 521 |
| 535 SpeechRecognitionManagerImpl::FSMState | 522 SpeechRecognitionManagerImpl::FSMState |
| 536 SpeechRecognitionManagerImpl::GetSessionState(int session_id) const { | 523 SpeechRecognitionManagerImpl::GetSessionState(int session_id) const { |
| 537 const Session& session = GetSession(session_id); | 524 Session* session = GetSession(session_id); |
| 538 if (!session.recognizer.get() || !session.recognizer->IsActive()) | 525 if (!session->recognizer.get() || !session->recognizer->IsActive()) |
| 539 return SESSION_STATE_IDLE; | 526 return SESSION_STATE_IDLE; |
| 540 if (session.recognizer->IsCapturingAudio()) | 527 if (session->recognizer->IsCapturingAudio()) |
| 541 return SESSION_STATE_CAPTURING_AUDIO; | 528 return SESSION_STATE_CAPTURING_AUDIO; |
| 542 return SESSION_STATE_WAITING_FOR_RESULT; | 529 return SESSION_STATE_WAITING_FOR_RESULT; |
| 543 } | 530 } |
| 544 | 531 |
| 545 // ----------- Contract for all the FSM evolution functions below ------------- | 532 // ----------- Contract for all the FSM evolution functions below ------------- |
| 546 // - Are guaranteed to be executed in the IO thread; | 533 // - Are guaranteed to be executed in the IO thread; |
| 547 // - Are guaranteed to be not reentrant (themselves and each other); | 534 // - Are guaranteed to be not reentrant (themselves and each other); |
| 548 | 535 |
| 549 void SpeechRecognitionManagerImpl::SessionStart(const Session& session) { | 536 void SpeechRecognitionManagerImpl::SessionStart(const Session& session) { |
| 550 DCHECK_EQ(primary_session_id_, session.id); | 537 DCHECK_EQ(primary_session_id_, session.id); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 563 DCHECK(session.recognizer.get()); | 550 DCHECK(session.recognizer.get()); |
| 564 session.recognizer->StopAudioCapture(); | 551 session.recognizer->StopAudioCapture(); |
| 565 } | 552 } |
| 566 | 553 |
| 567 void SpeechRecognitionManagerImpl::ResetCapturingSessionId( | 554 void SpeechRecognitionManagerImpl::ResetCapturingSessionId( |
| 568 const Session& session) { | 555 const Session& session) { |
| 569 DCHECK_EQ(primary_session_id_, session.id); | 556 DCHECK_EQ(primary_session_id_, session.id); |
| 570 primary_session_id_ = kSessionIDInvalid; | 557 primary_session_id_ = kSessionIDInvalid; |
| 571 } | 558 } |
| 572 | 559 |
| 573 void SpeechRecognitionManagerImpl::SessionDelete(const Session& session) { | 560 void SpeechRecognitionManagerImpl::SessionDelete(Session* session) { |
| 574 DCHECK(session.recognizer == NULL || !session.recognizer->IsActive()); | 561 DCHECK(session->recognizer == NULL || !session->recognizer->IsActive()); |
| 575 if (primary_session_id_ == session.id) | 562 if (primary_session_id_ == session->id) |
| 576 primary_session_id_ = kSessionIDInvalid; | 563 primary_session_id_ = kSessionIDInvalid; |
| 577 sessions_.erase(session.id); | 564 sessions_.erase(session->id); |
| 565 delete session; |
| 578 } | 566 } |
| 579 | 567 |
| 580 void SpeechRecognitionManagerImpl::NotFeasible(const Session& session, | 568 void SpeechRecognitionManagerImpl::NotFeasible(const Session& session, |
| 581 FSMEvent event) { | 569 FSMEvent event) { |
| 582 NOTREACHED() << "Unfeasible event " << event | 570 NOTREACHED() << "Unfeasible event " << event |
| 583 << " in state " << GetSessionState(session.id) | 571 << " in state " << GetSessionState(session.id) |
| 584 << " for session " << session.id; | 572 << " for session " << session.id; |
| 585 } | 573 } |
| 586 | 574 |
| 587 int SpeechRecognitionManagerImpl::GetNextSessionID() { | 575 int SpeechRecognitionManagerImpl::GetNextSessionID() { |
| 588 ++last_session_id_; | 576 ++last_session_id_; |
| 589 // Deal with wrapping of last_session_id_. (How civilized). | 577 // Deal with wrapping of last_session_id_. (How civilized). |
| 590 if (last_session_id_ <= 0) | 578 if (last_session_id_ <= 0) |
| 591 last_session_id_ = 1; | 579 last_session_id_ = 1; |
| 592 return last_session_id_; | 580 return last_session_id_; |
| 593 } | 581 } |
| 594 | 582 |
| 595 bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const { | 583 bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const { |
| 596 return sessions_.find(session_id) != sessions_.end(); | 584 return sessions_.find(session_id) != sessions_.end(); |
| 597 } | 585 } |
| 598 | 586 |
| 599 const SpeechRecognitionManagerImpl::Session& | 587 SpeechRecognitionManagerImpl::Session* |
| 600 SpeechRecognitionManagerImpl::GetSession(int session_id) const { | 588 SpeechRecognitionManagerImpl::GetSession(int session_id) const { |
| 601 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 589 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
| 602 SessionsTable::const_iterator iter = sessions_.find(session_id); | 590 SessionsTable::const_iterator iter = sessions_.find(session_id); |
| 603 DCHECK(iter != sessions_.end()); | 591 DCHECK(iter != sessions_.end()); |
| 604 return iter->second; | 592 return iter->second; |
| 605 } | 593 } |
| 606 | 594 |
| 607 SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( | 595 SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( |
| 608 int session_id) const { | 596 int session_id) const { |
| 609 const Session& session = GetSession(session_id); | 597 Session* session = GetSession(session_id); |
| 610 return session.listener_is_active ? session.config.event_listener : NULL; | 598 return session->listener_is_active ? session->config.event_listener : NULL; |
| 611 } | 599 } |
| 612 | 600 |
| 613 SpeechRecognitionEventListener* | 601 SpeechRecognitionEventListener* |
| 614 SpeechRecognitionManagerImpl::GetDelegateListener() const { | 602 SpeechRecognitionManagerImpl::GetDelegateListener() const { |
| 615 return delegate_.get() ? delegate_->GetEventListener() : NULL; | 603 return delegate_.get() ? delegate_->GetEventListener() : NULL; |
| 616 } | 604 } |
| 617 | 605 |
| 618 const SpeechRecognitionSessionConfig& | 606 const SpeechRecognitionSessionConfig& |
| 619 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id) const { | 607 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id) const { |
| 620 return GetSession(session_id).config; | 608 return GetSession(session_id)->config; |
| 621 } | 609 } |
| 622 | 610 |
| 623 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { | 611 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { |
| 624 return BrowserMainLoop::GetAudioManager()->HasAudioInputDevices(); | 612 return BrowserMainLoop::GetAudioManager()->HasAudioInputDevices(); |
| 625 } | 613 } |
| 626 | 614 |
| 627 bool SpeechRecognitionManagerImpl::IsCapturingAudio() { | 615 bool SpeechRecognitionManagerImpl::IsCapturingAudio() { |
| 628 return BrowserMainLoop::GetAudioManager()->IsRecordingInProcess(); | 616 return BrowserMainLoop::GetAudioManager()->IsRecordingInProcess(); |
| 629 } | 617 } |
| 630 | 618 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 641 | 629 |
| 642 SpeechRecognitionManagerImpl::Session::Session() | 630 SpeechRecognitionManagerImpl::Session::Session() |
| 643 : id(kSessionIDInvalid), | 631 : id(kSessionIDInvalid), |
| 644 listener_is_active(true) { | 632 listener_is_active(true) { |
| 645 } | 633 } |
| 646 | 634 |
| 647 SpeechRecognitionManagerImpl::Session::~Session() { | 635 SpeechRecognitionManagerImpl::Session::~Session() { |
| 648 } | 636 } |
| 649 | 637 |
| 650 } // namespace content | 638 } // namespace content |
| OLD | NEW |