OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/browser/speech/speech_recognition_manager_impl.h" | 5 #include "content/browser/speech/speech_recognition_manager_impl.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "content/browser/browser_main_loop.h" | 8 #include "content/browser/browser_main_loop.h" |
9 #include "content/browser/renderer_host/media/media_stream_manager.h" | 9 #include "content/browser/renderer_host/media/media_stream_manager.h" |
| 10 #include "content/browser/renderer_host/media/media_stream_ui_proxy.h" |
10 #include "content/browser/speech/google_one_shot_remote_engine.h" | 11 #include "content/browser/speech/google_one_shot_remote_engine.h" |
11 #include "content/browser/speech/google_streaming_remote_engine.h" | 12 #include "content/browser/speech/google_streaming_remote_engine.h" |
12 #include "content/browser/speech/speech_recognition_engine.h" | 13 #include "content/browser/speech/speech_recognition_engine.h" |
13 #include "content/browser/speech/speech_recognizer.h" | 14 #include "content/browser/speech/speech_recognizer.h" |
14 #include "content/public/browser/browser_thread.h" | 15 #include "content/public/browser/browser_thread.h" |
15 #include "content/public/browser/content_browser_client.h" | 16 #include "content/public/browser/content_browser_client.h" |
16 #include "content/public/browser/resource_context.h" | 17 #include "content/public/browser/resource_context.h" |
17 #include "content/public/browser/speech_recognition_event_listener.h" | 18 #include "content/public/browser/speech_recognition_event_listener.h" |
18 #include "content/public/browser/speech_recognition_manager_delegate.h" | 19 #include "content/public/browser/speech_recognition_manager_delegate.h" |
19 #include "content/public/browser/speech_recognition_session_config.h" | 20 #include "content/public/browser/speech_recognition_session_config.h" |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
61 delegate_(GetContentClient()->browser()-> | 62 delegate_(GetContentClient()->browser()-> |
62 GetSpeechRecognitionManagerDelegate()), | 63 GetSpeechRecognitionManagerDelegate()), |
63 ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) { | 64 ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) { |
64 DCHECK(!g_speech_recognition_manager_impl); | 65 DCHECK(!g_speech_recognition_manager_impl); |
65 g_speech_recognition_manager_impl = this; | 66 g_speech_recognition_manager_impl = this; |
66 } | 67 } |
67 | 68 |
68 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { | 69 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { |
69 DCHECK(g_speech_recognition_manager_impl); | 70 DCHECK(g_speech_recognition_manager_impl); |
70 g_speech_recognition_manager_impl = NULL; | 71 g_speech_recognition_manager_impl = NULL; |
71 // Recognition sessions will be aborted by the corresponding destructors. | 72 |
| 73 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); |
| 74 ++it) { |
| 75 // MediaStreamUIProxy must be deleted on the IO thread. |
| 76 BrowserThread::DeleteSoon(BrowserThread::IO, FROM_HERE, |
| 77 it->second->ui.release()); |
| 78 delete it->second; |
| 79 } |
72 sessions_.clear(); | 80 sessions_.clear(); |
73 } | 81 } |
74 | 82 |
75 int SpeechRecognitionManagerImpl::CreateSession( | 83 int SpeechRecognitionManagerImpl::CreateSession( |
76 const SpeechRecognitionSessionConfig& config) { | 84 const SpeechRecognitionSessionConfig& config) { |
77 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 85 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
78 | 86 |
79 const int session_id = GetNextSessionID(); | 87 const int session_id = GetNextSessionID(); |
80 DCHECK(!SessionExists(session_id)); | 88 DCHECK(!SessionExists(session_id)); |
81 // Set-up the new session. | 89 // Set-up the new session. |
82 Session& session = sessions_[session_id]; | 90 Session* session = new Session(); |
83 session.id = session_id; | 91 sessions_[session_id] = session; |
84 session.config = config; | 92 session->id = session_id; |
85 session.context = config.initial_context; | 93 session->config = config; |
| 94 session->context = config.initial_context; |
86 | 95 |
87 std::string hardware_info; | 96 std::string hardware_info; |
88 bool can_report_metrics = false; | 97 bool can_report_metrics = false; |
89 if (delegate_) | 98 if (delegate_) |
90 delegate_->GetDiagnosticInformation(&can_report_metrics, &hardware_info); | 99 delegate_->GetDiagnosticInformation(&can_report_metrics, &hardware_info); |
91 | 100 |
92 SpeechRecognitionEngineConfig remote_engine_config; | 101 SpeechRecognitionEngineConfig remote_engine_config; |
93 remote_engine_config.language = config.language; | 102 remote_engine_config.language = config.language; |
94 remote_engine_config.grammars = config.grammars; | 103 remote_engine_config.grammars = config.grammars; |
95 remote_engine_config.audio_sample_rate = SpeechRecognizer::kAudioSampleRate; | 104 remote_engine_config.audio_sample_rate = SpeechRecognizer::kAudioSampleRate; |
(...skipping 14 matching lines...) Expand all Loading... |
110 } else { | 119 } else { |
111 google_remote_engine = | 120 google_remote_engine = |
112 new GoogleStreamingRemoteEngine(config.url_request_context_getter); | 121 new GoogleStreamingRemoteEngine(config.url_request_context_getter); |
113 } | 122 } |
114 | 123 |
115 google_remote_engine->SetConfig(remote_engine_config); | 124 google_remote_engine->SetConfig(remote_engine_config); |
116 | 125 |
117 // The legacy api cannot use continuous mode. | 126 // The legacy api cannot use continuous mode. |
118 DCHECK(!config.is_legacy_api || !config.continuous); | 127 DCHECK(!config.is_legacy_api || !config.continuous); |
119 | 128 |
120 session.recognizer = new SpeechRecognizer( | 129 session->recognizer = new SpeechRecognizer( |
121 this, | 130 this, |
122 session_id, | 131 session_id, |
123 !config.continuous, | 132 !config.continuous, |
124 google_remote_engine); | 133 google_remote_engine); |
125 return session_id; | 134 return session_id; |
126 } | 135 } |
127 | 136 |
128 void SpeechRecognitionManagerImpl::StartSession(int session_id) { | 137 void SpeechRecognitionManagerImpl::StartSession(int session_id) { |
129 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 138 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
130 if (!SessionExists(session_id)) | 139 if (!SessionExists(session_id)) |
(...skipping 24 matching lines...) Expand all Loading... |
155 return; | 164 return; |
156 | 165 |
157 #if defined(OS_IOS) | 166 #if defined(OS_IOS) |
158 // On iOS, voice search can only be initiated by clear user action and thus | 167 // On iOS, voice search can only be initiated by clear user action and thus |
159 // it is always allowed. | 168 // it is always allowed. |
160 DCHECK(!ask_user && is_allowed); | 169 DCHECK(!ask_user && is_allowed); |
161 #else | 170 #else |
162 if (ask_user) { | 171 if (ask_user) { |
163 SessionsTable::iterator iter = sessions_.find(session_id); | 172 SessionsTable::iterator iter = sessions_.find(session_id); |
164 DCHECK(iter != sessions_.end()); | 173 DCHECK(iter != sessions_.end()); |
165 SpeechRecognitionSessionContext& context = iter->second.context; | 174 SpeechRecognitionSessionContext& context = iter->second->context; |
166 context.label = | 175 context.label = |
167 BrowserMainLoop::GetMediaStreamManager()->MakeMediaAccessRequest( | 176 BrowserMainLoop::GetMediaStreamManager()->MakeMediaAccessRequest( |
168 context.render_process_id, | 177 context.render_process_id, |
169 context.render_view_id, | 178 context.render_view_id, |
170 StreamOptions(MEDIA_DEVICE_AUDIO_CAPTURE, MEDIA_NO_SERVICE), | 179 StreamOptions(MEDIA_DEVICE_AUDIO_CAPTURE, MEDIA_NO_SERVICE), |
171 GURL(context.context_name), | 180 GURL(context.context_name), |
172 base::Bind( | 181 base::Bind( |
173 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, | 182 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, |
174 weak_factory_.GetWeakPtr())); | 183 weak_factory_.GetWeakPtr(), session_id)); |
175 | |
176 return; | 184 return; |
177 } | 185 } |
178 #endif // defined(OS_IOS) | 186 #endif // defined(OS_IOS) |
179 | 187 |
180 if (is_allowed) { | 188 if (is_allowed) { |
181 MessageLoop::current()->PostTask(FROM_HERE, | 189 MessageLoop::current()->PostTask(FROM_HERE, |
182 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 190 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
183 weak_factory_.GetWeakPtr(), session_id, EVENT_START)); | 191 weak_factory_.GetWeakPtr(), session_id, EVENT_START)); |
184 } else { | 192 } else { |
185 OnRecognitionError(session_id, SpeechRecognitionError( | 193 OnRecognitionError(session_id, SpeechRecognitionError( |
186 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED)); | 194 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED)); |
187 MessageLoop::current()->PostTask(FROM_HERE, | 195 MessageLoop::current()->PostTask(FROM_HERE, |
188 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 196 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
189 weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); | 197 weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); |
190 } | 198 } |
191 } | 199 } |
192 | 200 |
193 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback( | 201 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback( |
194 const std::string& label, const MediaStreamDevices& devices) { | 202 int session_id, |
| 203 const MediaStreamDevices& devices, |
| 204 scoped_ptr<MediaStreamUIProxy> stream_ui) { |
195 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 205 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
196 for (SessionsTable::iterator iter = sessions_.begin(); | |
197 iter != sessions_.end(); ++iter) { | |
198 if (iter->second.context.label == label) { | |
199 bool is_allowed = false; | |
200 if (!devices.empty()) { | |
201 // Copy the approved devices array to the context for UI indication. | |
202 iter->second.context.devices = devices; | |
203 is_allowed = true; | |
204 } | |
205 | 206 |
206 // Clear the label to indicate the request has been done. | 207 SessionsTable::iterator iter = sessions_.find(session_id); |
207 iter->second.context.label.clear(); | 208 if (iter == sessions_.end()) |
| 209 return; |
208 | 210 |
209 // Notify the recognition about the request result. | 211 bool is_allowed = !devices.empty(); |
210 RecognitionAllowedCallback(iter->first, false, is_allowed); | 212 if (is_allowed) { |
211 break; | 213 // Copy the approved devices array to the context for UI indication. |
212 } | 214 iter->second->context.devices = devices; |
| 215 |
| 216 // Save the UI object. |
| 217 iter->second->ui = stream_ui.Pass(); |
213 } | 218 } |
| 219 |
| 220 // Clear the label to indicate the request has been done. |
| 221 iter->second->context.label.clear(); |
| 222 |
| 223 // Notify the recognition about the request result. |
| 224 RecognitionAllowedCallback(iter->first, false, is_allowed); |
214 } | 225 } |
215 | 226 |
216 void SpeechRecognitionManagerImpl::AbortSession(int session_id) { | 227 void SpeechRecognitionManagerImpl::AbortSession(int session_id) { |
217 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 228 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
218 if (!SessionExists(session_id)) | 229 if (!SessionExists(session_id)) |
219 return; | 230 return; |
220 | 231 |
221 #if !defined(OS_IOS) | 232 SessionsTable::iterator iter = sessions_.find(session_id); |
222 const SpeechRecognitionSessionContext& context = | 233 iter->second->ui.reset(); |
223 GetSessionContext(session_id); | |
224 if (!context.label.empty()) | |
225 BrowserMainLoop::GetMediaStreamManager()->CancelRequest(context.label); | |
226 #endif // !defined(OS_IOS) | |
227 | 234 |
228 MessageLoop::current()->PostTask(FROM_HERE, | 235 MessageLoop::current()->PostTask(FROM_HERE, |
229 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 236 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
230 weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); | 237 weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); |
231 } | 238 } |
232 | 239 |
233 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) { | 240 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) { |
234 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 241 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
235 if (!SessionExists(session_id)) | 242 if (!SessionExists(session_id)) |
236 return; | 243 return; |
237 | 244 |
238 #if !defined(OS_IOS) | 245 SessionsTable::iterator iter = sessions_.find(session_id); |
239 const SpeechRecognitionSessionContext& context = | 246 iter->second->ui.reset(); |
240 GetSessionContext(session_id); | |
241 if (!context.label.empty()) | |
242 BrowserMainLoop::GetMediaStreamManager()->CancelRequest(context.label); | |
243 #endif // !defined(OS_IOS) | |
244 | 247 |
245 MessageLoop::current()->PostTask(FROM_HERE, | 248 MessageLoop::current()->PostTask(FROM_HERE, |
246 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 249 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
247 weak_factory_.GetWeakPtr(), session_id, EVENT_STOP_CAPTURE)); | 250 weak_factory_.GetWeakPtr(), session_id, EVENT_STOP_CAPTURE)); |
248 } | 251 } |
249 | 252 |
250 // Here begins the SpeechRecognitionEventListener interface implementation, | 253 // Here begins the SpeechRecognitionEventListener interface implementation, |
251 // which will simply relay the events to the proper listener registered for the | 254 // which will simply relay the events to the proper listener registered for the |
252 // particular session (most likely InputTagSpeechDispatcherHost) and to the | 255 // particular session (most likely InputTagSpeechDispatcherHost) and to the |
253 // catch-all listener provided by the delegate (if any). | 256 // catch-all listener provided by the delegate (if any). |
254 | 257 |
255 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { | 258 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { |
256 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 259 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
257 if (!SessionExists(session_id)) | 260 if (!SessionExists(session_id)) |
258 return; | 261 return; |
259 | 262 |
260 #if !defined(OS_IOS) | 263 SessionsTable::iterator iter = sessions_.find(session_id); |
261 const SpeechRecognitionSessionContext& context = | 264 if (iter->second->ui) { |
262 GetSessionContext(session_id); | 265 // Notify the UI that the devices are being used. |
263 if (!context.devices.empty()) { | 266 iter->second->ui->OnStarted(base::Closure()); |
264 // Notify the UI the devices are being used. | |
265 BrowserMainLoop::GetMediaStreamManager()->NotifyUIDevicesOpened( | |
266 context.label); | |
267 } | 267 } |
268 #endif // !defined(OS_IOS) | |
269 | 268 |
270 DCHECK_EQ(primary_session_id_, session_id); | 269 DCHECK_EQ(primary_session_id_, session_id); |
271 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) | 270 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) |
272 delegate_listener->OnRecognitionStart(session_id); | 271 delegate_listener->OnRecognitionStart(session_id); |
273 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) | 272 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) |
274 listener->OnRecognitionStart(session_id); | 273 listener->OnRecognitionStart(session_id); |
275 } | 274 } |
276 | 275 |
277 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { | 276 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { |
278 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 277 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
369 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) | 368 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) |
370 delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume); | 369 delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume); |
371 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) | 370 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) |
372 listener->OnAudioLevelsChange(session_id, volume, noise_volume); | 371 listener->OnAudioLevelsChange(session_id, volume, noise_volume); |
373 } | 372 } |
374 | 373 |
375 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { | 374 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { |
376 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 375 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
377 if (!SessionExists(session_id)) | 376 if (!SessionExists(session_id)) |
378 return; | 377 return; |
379 #if !defined(OS_IOS) | |
380 const SpeechRecognitionSessionContext& context = | |
381 GetSessionContext(session_id); | |
382 if (!context.devices.empty()) { | |
383 // Notify the UI the devices has been closed. | |
384 BrowserMainLoop::GetMediaStreamManager()->NotifyUIDevicesClosed( | |
385 context.label); | |
386 } | |
387 #endif // !defined(OS_IOS) | |
388 | 378 |
389 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) | 379 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) |
390 delegate_listener->OnRecognitionEnd(session_id); | 380 delegate_listener->OnRecognitionEnd(session_id); |
391 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) | 381 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) |
392 listener->OnRecognitionEnd(session_id); | 382 listener->OnRecognitionEnd(session_id); |
393 MessageLoop::current()->PostTask(FROM_HERE, | 383 MessageLoop::current()->PostTask(FROM_HERE, |
394 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 384 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
395 weak_factory_.GetWeakPtr(), | 385 weak_factory_.GetWeakPtr(), |
396 session_id, | 386 session_id, |
397 EVENT_RECOGNITION_ENDED)); | 387 EVENT_RECOGNITION_ENDED)); |
398 } | 388 } |
399 | 389 |
400 int SpeechRecognitionManagerImpl::GetSession( | 390 int SpeechRecognitionManagerImpl::GetSession( |
401 int render_process_id, int render_view_id, int request_id) const { | 391 int render_process_id, int render_view_id, int request_id) const { |
402 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 392 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
403 SessionsTable::const_iterator iter; | 393 SessionsTable::const_iterator iter; |
404 for(iter = sessions_.begin(); iter != sessions_.end(); ++iter) { | 394 for(iter = sessions_.begin(); iter != sessions_.end(); ++iter) { |
405 const int session_id = iter->first; | 395 const int session_id = iter->first; |
406 const SpeechRecognitionSessionContext& context = iter->second.context; | 396 const SpeechRecognitionSessionContext& context = iter->second->context; |
407 if (context.render_process_id == render_process_id && | 397 if (context.render_process_id == render_process_id && |
408 context.render_view_id == render_view_id && | 398 context.render_view_id == render_view_id && |
409 context.request_id == request_id) { | 399 context.request_id == request_id) { |
410 return session_id; | 400 return session_id; |
411 } | 401 } |
412 } | 402 } |
413 return kSessionIDInvalid; | 403 return kSessionIDInvalid; |
414 } | 404 } |
415 | 405 |
416 SpeechRecognitionSessionContext | 406 SpeechRecognitionSessionContext |
417 SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const { | 407 SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const { |
418 return GetSession(session_id).context; | 408 return GetSession(session_id)->context; |
419 } | 409 } |
420 | 410 |
421 void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( | 411 void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( |
422 SpeechRecognitionEventListener* listener) { | 412 SpeechRecognitionEventListener* listener) { |
423 // This method gracefully destroys sessions for the listener. However, since | 413 // This method gracefully destroys sessions for the listener. However, since |
424 // the listener itself is likely to be destroyed after this call, we avoid | 414 // the listener itself is likely to be destroyed after this call, we avoid |
425 // dispatching further events to it, marking the |listener_is_active| flag. | 415 // dispatching further events to it, marking the |listener_is_active| flag. |
426 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 416 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
427 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); | 417 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); |
428 ++it) { | 418 ++it) { |
429 Session& session = it->second; | 419 Session* session = it->second; |
430 if (session.config.event_listener == listener) { | 420 if (session->config.event_listener == listener) { |
431 AbortSession(session.id); | 421 AbortSession(session->id); |
432 session.listener_is_active = false; | 422 session->listener_is_active = false; |
433 } | 423 } |
434 } | 424 } |
435 } | 425 } |
436 | 426 |
437 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView( | 427 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView( |
438 int render_process_id, | 428 int render_process_id, |
439 int render_view_id) { | 429 int render_view_id) { |
440 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 430 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
441 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); | 431 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); |
442 ++it) { | 432 ++it) { |
443 Session& session = it->second; | 433 Session* session = it->second; |
444 if (session.context.render_process_id == render_process_id && | 434 if (session->context.render_process_id == render_process_id && |
445 session.context.render_view_id == render_view_id) { | 435 session->context.render_view_id == render_view_id) { |
446 AbortSession(session.id); | 436 AbortSession(session->id); |
447 } | 437 } |
448 } | 438 } |
449 } | 439 } |
450 | 440 |
451 // ----------------------- Core FSM implementation --------------------------- | 441 // ----------------------- Core FSM implementation --------------------------- |
452 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, | 442 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, |
453 FSMEvent event) { | 443 FSMEvent event) { |
454 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 444 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
455 | 445 |
456 // There are some corner cases in which the session might be deleted (due to | 446 // There are some corner cases in which the session might be deleted (due to |
457 // an EndRecognition event) between a request (e.g. Abort) and its dispatch. | 447 // an EndRecognition event) between a request (e.g. Abort) and its dispatch. |
458 if (!SessionExists(session_id)) | 448 if (!SessionExists(session_id)) |
459 return; | 449 return; |
460 | 450 |
461 const Session& session = GetSession(session_id); | 451 Session* session = GetSession(session_id); |
462 FSMState session_state = GetSessionState(session_id); | 452 FSMState session_state = GetSessionState(session_id); |
463 DCHECK_LE(session_state, SESSION_STATE_MAX_VALUE); | 453 DCHECK_LE(session_state, SESSION_STATE_MAX_VALUE); |
464 DCHECK_LE(event, EVENT_MAX_VALUE); | 454 DCHECK_LE(event, EVENT_MAX_VALUE); |
465 | 455 |
466 // Event dispatching must be sequential, otherwise it will break all the rules | 456 // Event dispatching must be sequential, otherwise it will break all the rules |
467 // and the assumptions of the finite state automata model. | 457 // and the assumptions of the finite state automata model. |
468 DCHECK(!is_dispatching_event_); | 458 DCHECK(!is_dispatching_event_); |
469 is_dispatching_event_ = true; | 459 is_dispatching_event_ = true; |
470 ExecuteTransitionAndGetNextState(session, session_state, event); | 460 ExecuteTransitionAndGetNextState(session, session_state, event); |
471 is_dispatching_event_ = false; | 461 is_dispatching_event_ = false; |
472 } | 462 } |
473 | 463 |
474 // This FSM handles the evolution of each session, from the viewpoint of the | 464 // This FSM handles the evolution of each session, from the viewpoint of the |
475 // interaction with the user (that may be either the browser end-user which | 465 // interaction with the user (that may be either the browser end-user which |
476 // interacts with UI bubbles, or JS developer intracting with JS methods). | 466 // interacts with UI bubbles, or JS developer intracting with JS methods). |
477 // All the events received by the SpeechRecognizer instances (one for each | 467 // All the events received by the SpeechRecognizer instances (one for each |
478 // session) are always routed to the SpeechRecognitionEventListener(s) | 468 // session) are always routed to the SpeechRecognitionEventListener(s) |
479 // regardless the choices taken in this FSM. | 469 // regardless the choices taken in this FSM. |
480 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( | 470 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( |
481 const Session& session, FSMState session_state, FSMEvent event) { | 471 Session* session, FSMState session_state, FSMEvent event) { |
482 // Note: since we're not tracking the state of the recognizer object, rather | 472 // Note: since we're not tracking the state of the recognizer object, rather |
483 // we're directly retrieving it (through GetSessionState), we see its events | 473 // we're directly retrieving it (through GetSessionState), we see its events |
484 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution | 474 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution |
485 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just | 475 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just |
486 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus | 476 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus |
487 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT). | 477 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT). |
488 // This makes the code below a bit tricky but avoids a lot of code for | 478 // This makes the code below a bit tricky but avoids a lot of code for |
489 // tracking and reconstructing asynchronously the state of the recognizer. | 479 // tracking and reconstructing asynchronously the state of the recognizer. |
490 switch (session_state) { | 480 switch (session_state) { |
491 case SESSION_STATE_IDLE: | 481 case SESSION_STATE_IDLE: |
492 switch (event) { | 482 switch (event) { |
493 case EVENT_START: | 483 case EVENT_START: |
494 return SessionStart(session); | 484 return SessionStart(*session); |
495 case EVENT_ABORT: | 485 case EVENT_ABORT: |
496 return SessionAbort(session); | 486 return SessionAbort(*session); |
497 case EVENT_RECOGNITION_ENDED: | 487 case EVENT_RECOGNITION_ENDED: |
498 return SessionDelete(session); | 488 return SessionDelete(session); |
499 case EVENT_STOP_CAPTURE: | 489 case EVENT_STOP_CAPTURE: |
500 return SessionStopAudioCapture(session); | 490 return SessionStopAudioCapture(*session); |
501 case EVENT_AUDIO_ENDED: | 491 case EVENT_AUDIO_ENDED: |
502 return; | 492 return; |
503 } | 493 } |
504 break; | 494 break; |
505 case SESSION_STATE_CAPTURING_AUDIO: | 495 case SESSION_STATE_CAPTURING_AUDIO: |
506 switch (event) { | 496 switch (event) { |
507 case EVENT_STOP_CAPTURE: | 497 case EVENT_STOP_CAPTURE: |
508 return SessionStopAudioCapture(session); | 498 return SessionStopAudioCapture(*session); |
509 case EVENT_ABORT: | 499 case EVENT_ABORT: |
510 return SessionAbort(session); | 500 return SessionAbort(*session); |
511 case EVENT_START: | 501 case EVENT_START: |
512 return; | 502 return; |
513 case EVENT_AUDIO_ENDED: | 503 case EVENT_AUDIO_ENDED: |
514 case EVENT_RECOGNITION_ENDED: | 504 case EVENT_RECOGNITION_ENDED: |
515 return NotFeasible(session, event); | 505 return NotFeasible(*session, event); |
516 } | 506 } |
517 break; | 507 break; |
518 case SESSION_STATE_WAITING_FOR_RESULT: | 508 case SESSION_STATE_WAITING_FOR_RESULT: |
519 switch (event) { | 509 switch (event) { |
520 case EVENT_ABORT: | 510 case EVENT_ABORT: |
521 return SessionAbort(session); | 511 return SessionAbort(*session); |
522 case EVENT_AUDIO_ENDED: | 512 case EVENT_AUDIO_ENDED: |
523 return ResetCapturingSessionId(session); | 513 return ResetCapturingSessionId(*session); |
524 case EVENT_START: | 514 case EVENT_START: |
525 case EVENT_STOP_CAPTURE: | 515 case EVENT_STOP_CAPTURE: |
526 return; | 516 return; |
527 case EVENT_RECOGNITION_ENDED: | 517 case EVENT_RECOGNITION_ENDED: |
528 return NotFeasible(session, event); | 518 return NotFeasible(*session, event); |
529 } | 519 } |
530 break; | 520 break; |
531 } | 521 } |
532 return NotFeasible(session, event); | 522 return NotFeasible(*session, event); |
533 } | 523 } |
534 | 524 |
535 SpeechRecognitionManagerImpl::FSMState | 525 SpeechRecognitionManagerImpl::FSMState |
536 SpeechRecognitionManagerImpl::GetSessionState(int session_id) const { | 526 SpeechRecognitionManagerImpl::GetSessionState(int session_id) const { |
537 const Session& session = GetSession(session_id); | 527 Session* session = GetSession(session_id); |
538 if (!session.recognizer.get() || !session.recognizer->IsActive()) | 528 if (!session->recognizer.get() || !session->recognizer->IsActive()) |
539 return SESSION_STATE_IDLE; | 529 return SESSION_STATE_IDLE; |
540 if (session.recognizer->IsCapturingAudio()) | 530 if (session->recognizer->IsCapturingAudio()) |
541 return SESSION_STATE_CAPTURING_AUDIO; | 531 return SESSION_STATE_CAPTURING_AUDIO; |
542 return SESSION_STATE_WAITING_FOR_RESULT; | 532 return SESSION_STATE_WAITING_FOR_RESULT; |
543 } | 533 } |
544 | 534 |
545 // ----------- Contract for all the FSM evolution functions below ------------- | 535 // ----------- Contract for all the FSM evolution functions below ------------- |
546 // - Are guaranteed to be executed in the IO thread; | 536 // - Are guaranteed to be executed in the IO thread; |
547 // - Are guaranteed to be not reentrant (themselves and each other); | 537 // - Are guaranteed to be not reentrant (themselves and each other); |
548 | 538 |
549 void SpeechRecognitionManagerImpl::SessionStart(const Session& session) { | 539 void SpeechRecognitionManagerImpl::SessionStart(const Session& session) { |
550 DCHECK_EQ(primary_session_id_, session.id); | 540 DCHECK_EQ(primary_session_id_, session.id); |
(...skipping 12 matching lines...) Expand all Loading... |
563 DCHECK(session.recognizer.get()); | 553 DCHECK(session.recognizer.get()); |
564 session.recognizer->StopAudioCapture(); | 554 session.recognizer->StopAudioCapture(); |
565 } | 555 } |
566 | 556 |
567 void SpeechRecognitionManagerImpl::ResetCapturingSessionId( | 557 void SpeechRecognitionManagerImpl::ResetCapturingSessionId( |
568 const Session& session) { | 558 const Session& session) { |
569 DCHECK_EQ(primary_session_id_, session.id); | 559 DCHECK_EQ(primary_session_id_, session.id); |
570 primary_session_id_ = kSessionIDInvalid; | 560 primary_session_id_ = kSessionIDInvalid; |
571 } | 561 } |
572 | 562 |
573 void SpeechRecognitionManagerImpl::SessionDelete(const Session& session) { | 563 void SpeechRecognitionManagerImpl::SessionDelete(Session* session) { |
574 DCHECK(session.recognizer == NULL || !session.recognizer->IsActive()); | 564 DCHECK(session->recognizer == NULL || !session->recognizer->IsActive()); |
575 if (primary_session_id_ == session.id) | 565 if (primary_session_id_ == session->id) |
576 primary_session_id_ = kSessionIDInvalid; | 566 primary_session_id_ = kSessionIDInvalid; |
577 sessions_.erase(session.id); | 567 sessions_.erase(session->id); |
| 568 delete session; |
578 } | 569 } |
579 | 570 |
580 void SpeechRecognitionManagerImpl::NotFeasible(const Session& session, | 571 void SpeechRecognitionManagerImpl::NotFeasible(const Session& session, |
581 FSMEvent event) { | 572 FSMEvent event) { |
582 NOTREACHED() << "Unfeasible event " << event | 573 NOTREACHED() << "Unfeasible event " << event |
583 << " in state " << GetSessionState(session.id) | 574 << " in state " << GetSessionState(session.id) |
584 << " for session " << session.id; | 575 << " for session " << session.id; |
585 } | 576 } |
586 | 577 |
587 int SpeechRecognitionManagerImpl::GetNextSessionID() { | 578 int SpeechRecognitionManagerImpl::GetNextSessionID() { |
588 ++last_session_id_; | 579 ++last_session_id_; |
589 // Deal with wrapping of last_session_id_. (How civilized). | 580 // Deal with wrapping of last_session_id_. (How civilized). |
590 if (last_session_id_ <= 0) | 581 if (last_session_id_ <= 0) |
591 last_session_id_ = 1; | 582 last_session_id_ = 1; |
592 return last_session_id_; | 583 return last_session_id_; |
593 } | 584 } |
594 | 585 |
595 bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const { | 586 bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const { |
596 return sessions_.find(session_id) != sessions_.end(); | 587 return sessions_.find(session_id) != sessions_.end(); |
597 } | 588 } |
598 | 589 |
599 const SpeechRecognitionManagerImpl::Session& | 590 SpeechRecognitionManagerImpl::Session* |
600 SpeechRecognitionManagerImpl::GetSession(int session_id) const { | 591 SpeechRecognitionManagerImpl::GetSession(int session_id) const { |
601 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 592 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
602 SessionsTable::const_iterator iter = sessions_.find(session_id); | 593 SessionsTable::const_iterator iter = sessions_.find(session_id); |
603 DCHECK(iter != sessions_.end()); | 594 DCHECK(iter != sessions_.end()); |
604 return iter->second; | 595 return iter->second; |
605 } | 596 } |
606 | 597 |
607 SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( | 598 SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( |
608 int session_id) const { | 599 int session_id) const { |
609 const Session& session = GetSession(session_id); | 600 Session* session = GetSession(session_id); |
610 return session.listener_is_active ? session.config.event_listener : NULL; | 601 return session->listener_is_active ? session->config.event_listener : NULL; |
611 } | 602 } |
612 | 603 |
613 SpeechRecognitionEventListener* | 604 SpeechRecognitionEventListener* |
614 SpeechRecognitionManagerImpl::GetDelegateListener() const { | 605 SpeechRecognitionManagerImpl::GetDelegateListener() const { |
615 return delegate_.get() ? delegate_->GetEventListener() : NULL; | 606 return delegate_.get() ? delegate_->GetEventListener() : NULL; |
616 } | 607 } |
617 | 608 |
618 const SpeechRecognitionSessionConfig& | 609 const SpeechRecognitionSessionConfig& |
619 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id) const { | 610 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id) const { |
620 return GetSession(session_id).config; | 611 return GetSession(session_id)->config; |
621 } | 612 } |
622 | 613 |
623 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { | 614 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { |
624 return BrowserMainLoop::GetAudioManager()->HasAudioInputDevices(); | 615 return BrowserMainLoop::GetAudioManager()->HasAudioInputDevices(); |
625 } | 616 } |
626 | 617 |
627 bool SpeechRecognitionManagerImpl::IsCapturingAudio() { | 618 bool SpeechRecognitionManagerImpl::IsCapturingAudio() { |
628 return BrowserMainLoop::GetAudioManager()->IsRecordingInProcess(); | 619 return BrowserMainLoop::GetAudioManager()->IsRecordingInProcess(); |
629 } | 620 } |
630 | 621 |
(...skipping 10 matching lines...) Expand all Loading... |
641 | 632 |
642 SpeechRecognitionManagerImpl::Session::Session() | 633 SpeechRecognitionManagerImpl::Session::Session() |
643 : id(kSessionIDInvalid), | 634 : id(kSessionIDInvalid), |
644 listener_is_active(true) { | 635 listener_is_active(true) { |
645 } | 636 } |
646 | 637 |
647 SpeechRecognitionManagerImpl::Session::~Session() { | 638 SpeechRecognitionManagerImpl::Session::~Session() { |
648 } | 639 } |
649 | 640 |
650 } // namespace content | 641 } // namespace content |
OLD | NEW |