OLD | NEW |
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/browser/speech/speech_recognition_manager_impl.h" | 5 #include "content/browser/speech/speech_recognition_manager_impl.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "content/browser/browser_main_loop.h" | 8 #include "content/browser/browser_main_loop.h" |
9 #include "content/browser/renderer_host/media/media_stream_manager.h" | 9 #include "content/browser/renderer_host/media/media_stream_manager.h" |
| 10 #include "content/browser/renderer_host/media/media_stream_ui_proxy.h" |
10 #include "content/browser/speech/google_one_shot_remote_engine.h" | 11 #include "content/browser/speech/google_one_shot_remote_engine.h" |
11 #include "content/browser/speech/google_streaming_remote_engine.h" | 12 #include "content/browser/speech/google_streaming_remote_engine.h" |
12 #include "content/browser/speech/speech_recognition_engine.h" | 13 #include "content/browser/speech/speech_recognition_engine.h" |
13 #include "content/browser/speech/speech_recognizer_impl.h" | 14 #include "content/browser/speech/speech_recognizer_impl.h" |
14 #include "content/public/browser/browser_thread.h" | 15 #include "content/public/browser/browser_thread.h" |
15 #include "content/public/browser/content_browser_client.h" | 16 #include "content/public/browser/content_browser_client.h" |
16 #include "content/public/browser/resource_context.h" | 17 #include "content/public/browser/resource_context.h" |
17 #include "content/public/browser/speech_recognition_event_listener.h" | 18 #include "content/public/browser/speech_recognition_event_listener.h" |
18 #include "content/public/browser/speech_recognition_manager_delegate.h" | 19 #include "content/public/browser/speech_recognition_manager_delegate.h" |
19 #include "content/public/browser/speech_recognition_session_config.h" | 20 #include "content/public/browser/speech_recognition_session_config.h" |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
61 delegate_(GetContentClient()->browser()-> | 62 delegate_(GetContentClient()->browser()-> |
62 GetSpeechRecognitionManagerDelegate()), | 63 GetSpeechRecognitionManagerDelegate()), |
63 weak_factory_(this) { | 64 weak_factory_(this) { |
64 DCHECK(!g_speech_recognition_manager_impl); | 65 DCHECK(!g_speech_recognition_manager_impl); |
65 g_speech_recognition_manager_impl = this; | 66 g_speech_recognition_manager_impl = this; |
66 } | 67 } |
67 | 68 |
68 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { | 69 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { |
69 DCHECK(g_speech_recognition_manager_impl); | 70 DCHECK(g_speech_recognition_manager_impl); |
70 g_speech_recognition_manager_impl = NULL; | 71 g_speech_recognition_manager_impl = NULL; |
71 // Recognition sessions will be aborted by the corresponding destructors. | 72 |
| 73 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); |
| 74 ++it) { |
| 75 // MediaStreamUIProxy must be deleted on the IO thread. |
| 76 BrowserThread::DeleteSoon(BrowserThread::IO, FROM_HERE, |
| 77 it->second->ui.release()); |
| 78 delete it->second; |
| 79 } |
72 sessions_.clear(); | 80 sessions_.clear(); |
73 } | 81 } |
74 | 82 |
75 int SpeechRecognitionManagerImpl::CreateSession( | 83 int SpeechRecognitionManagerImpl::CreateSession( |
76 const SpeechRecognitionSessionConfig& config) { | 84 const SpeechRecognitionSessionConfig& config) { |
77 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 85 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
78 | 86 |
79 const int session_id = GetNextSessionID(); | 87 const int session_id = GetNextSessionID(); |
80 DCHECK(!SessionExists(session_id)); | 88 DCHECK(!SessionExists(session_id)); |
81 // Set-up the new session. | 89 // Set-up the new session. |
82 Session& session = sessions_[session_id]; | 90 Session* session = new Session(); |
83 session.id = session_id; | 91 sessions_[session_id] = session; |
84 session.config = config; | 92 session->id = session_id; |
85 session.context = config.initial_context; | 93 session->config = config; |
| 94 session->context = config.initial_context; |
86 | 95 |
87 std::string hardware_info; | 96 std::string hardware_info; |
88 bool can_report_metrics = false; | 97 bool can_report_metrics = false; |
89 if (delegate_) | 98 if (delegate_) |
90 delegate_->GetDiagnosticInformation(&can_report_metrics, &hardware_info); | 99 delegate_->GetDiagnosticInformation(&can_report_metrics, &hardware_info); |
91 | 100 |
92 // The legacy api cannot use continuous mode. | 101 // The legacy api cannot use continuous mode. |
93 DCHECK(!config.is_legacy_api || !config.continuous); | 102 DCHECK(!config.is_legacy_api || !config.continuous); |
94 | 103 |
95 #if !defined(OS_ANDROID) | 104 #if !defined(OS_ANDROID) |
(...skipping 23 matching lines...) Expand all Loading... |
119 if (config.is_legacy_api) { | 128 if (config.is_legacy_api) { |
120 google_remote_engine = | 129 google_remote_engine = |
121 new GoogleOneShotRemoteEngine(config.url_request_context_getter.get()); | 130 new GoogleOneShotRemoteEngine(config.url_request_context_getter.get()); |
122 } else { | 131 } else { |
123 google_remote_engine = new GoogleStreamingRemoteEngine( | 132 google_remote_engine = new GoogleStreamingRemoteEngine( |
124 config.url_request_context_getter.get()); | 133 config.url_request_context_getter.get()); |
125 } | 134 } |
126 | 135 |
127 google_remote_engine->SetConfig(remote_engine_config); | 136 google_remote_engine->SetConfig(remote_engine_config); |
128 | 137 |
129 session.recognizer = new SpeechRecognizerImpl( | 138 session->recognizer = new SpeechRecognizerImpl( |
130 this, | 139 this, |
131 session_id, | 140 session_id, |
132 !config.continuous, | 141 !config.continuous, |
133 google_remote_engine); | 142 google_remote_engine); |
134 #else | 143 #else |
135 // TODO(janx): Implement a SpeechRecognizerImplAndroid with a JNI interface | 144 // TODO(janx): Implement a SpeechRecognizerImplAndroid with a JNI interface |
136 // forwarding calls to Android's platform speech recognition service (see | 145 // forwarding calls to Android's platform speech recognition service (see |
137 // crbug.com/222352). | 146 // crbug.com/222352). |
138 session.recognizer = NULL; | 147 session->recognizer = NULL; |
139 #endif | 148 #endif |
140 return session_id; | 149 return session_id; |
141 } | 150 } |
142 | 151 |
143 void SpeechRecognitionManagerImpl::StartSession(int session_id) { | 152 void SpeechRecognitionManagerImpl::StartSession(int session_id) { |
144 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 153 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
145 if (!SessionExists(session_id)) | 154 if (!SessionExists(session_id)) |
146 return; | 155 return; |
147 | 156 |
148 // If there is another active session, abort that. | 157 // If there is another active session, abort that. |
(...skipping 21 matching lines...) Expand all Loading... |
170 return; | 179 return; |
171 | 180 |
172 #if defined(OS_IOS) | 181 #if defined(OS_IOS) |
173 // On iOS, voice search can only be initiated by clear user action and thus | 182 // On iOS, voice search can only be initiated by clear user action and thus |
174 // it is always allowed. | 183 // it is always allowed. |
175 DCHECK(!ask_user && is_allowed); | 184 DCHECK(!ask_user && is_allowed); |
176 #else | 185 #else |
177 if (ask_user) { | 186 if (ask_user) { |
178 SessionsTable::iterator iter = sessions_.find(session_id); | 187 SessionsTable::iterator iter = sessions_.find(session_id); |
179 DCHECK(iter != sessions_.end()); | 188 DCHECK(iter != sessions_.end()); |
180 SpeechRecognitionSessionContext& context = iter->second.context; | 189 SpeechRecognitionSessionContext& context = iter->second->context; |
181 context.label = | 190 context.label = |
182 BrowserMainLoop::GetMediaStreamManager()->MakeMediaAccessRequest( | 191 BrowserMainLoop::GetMediaStreamManager()->MakeMediaAccessRequest( |
183 context.render_process_id, | 192 context.render_process_id, |
184 context.render_view_id, | 193 context.render_view_id, |
185 StreamOptions(MEDIA_DEVICE_AUDIO_CAPTURE, MEDIA_NO_SERVICE), | 194 StreamOptions(MEDIA_DEVICE_AUDIO_CAPTURE, MEDIA_NO_SERVICE), |
186 GURL(context.context_name), | 195 GURL(context.context_name), |
187 base::Bind( | 196 base::Bind( |
188 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, | 197 &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, |
189 weak_factory_.GetWeakPtr())); | 198 weak_factory_.GetWeakPtr(), session_id)); |
190 | |
191 return; | 199 return; |
192 } | 200 } |
193 #endif // defined(OS_IOS) | 201 #endif // defined(OS_IOS) |
194 | 202 |
195 if (is_allowed) { | 203 if (is_allowed) { |
196 base::MessageLoop::current()->PostTask( | 204 base::MessageLoop::current()->PostTask( |
197 FROM_HERE, | 205 FROM_HERE, |
198 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 206 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
199 weak_factory_.GetWeakPtr(), | 207 weak_factory_.GetWeakPtr(), |
200 session_id, | 208 session_id, |
201 EVENT_START)); | 209 EVENT_START)); |
202 } else { | 210 } else { |
203 OnRecognitionError(session_id, SpeechRecognitionError( | 211 OnRecognitionError(session_id, SpeechRecognitionError( |
204 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED)); | 212 SPEECH_RECOGNITION_ERROR_NOT_ALLOWED)); |
205 base::MessageLoop::current()->PostTask( | 213 base::MessageLoop::current()->PostTask( |
206 FROM_HERE, | 214 FROM_HERE, |
207 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 215 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
208 weak_factory_.GetWeakPtr(), | 216 weak_factory_.GetWeakPtr(), |
209 session_id, | 217 session_id, |
210 EVENT_ABORT)); | 218 EVENT_ABORT)); |
211 } | 219 } |
212 } | 220 } |
213 | 221 |
214 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback( | 222 void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback( |
215 const std::string& label, const MediaStreamDevices& devices) { | 223 int session_id, |
| 224 const MediaStreamDevices& devices, |
| 225 scoped_ptr<MediaStreamUIProxy> stream_ui) { |
216 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 226 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
217 for (SessionsTable::iterator iter = sessions_.begin(); | |
218 iter != sessions_.end(); ++iter) { | |
219 if (iter->second.context.label == label) { | |
220 bool is_allowed = false; | |
221 if (!devices.empty()) { | |
222 // Copy the approved devices array to the context for UI indication. | |
223 iter->second.context.devices = devices; | |
224 is_allowed = true; | |
225 } | |
226 | 227 |
227 // Clear the label to indicate the request has been done. | 228 SessionsTable::iterator iter = sessions_.find(session_id); |
228 iter->second.context.label.clear(); | 229 if (iter == sessions_.end()) |
| 230 return; |
229 | 231 |
230 // Notify the recognition about the request result. | 232 bool is_allowed = !devices.empty(); |
231 RecognitionAllowedCallback(iter->first, false, is_allowed); | 233 if (is_allowed) { |
232 break; | 234 // Copy the approved devices array to the context for UI indication. |
233 } | 235 iter->second->context.devices = devices; |
| 236 |
| 237 // Save the UI object. |
| 238 iter->second->ui = stream_ui.Pass(); |
234 } | 239 } |
| 240 |
| 241 // Clear the label to indicate the request has been done. |
| 242 iter->second->context.label.clear(); |
| 243 |
| 244 // Notify the recognition about the request result. |
| 245 RecognitionAllowedCallback(iter->first, false, is_allowed); |
235 } | 246 } |
236 | 247 |
237 void SpeechRecognitionManagerImpl::AbortSession(int session_id) { | 248 void SpeechRecognitionManagerImpl::AbortSession(int session_id) { |
238 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 249 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
239 if (!SessionExists(session_id)) | 250 if (!SessionExists(session_id)) |
240 return; | 251 return; |
241 | 252 |
242 #if !defined(OS_IOS) | 253 SessionsTable::iterator iter = sessions_.find(session_id); |
243 const SpeechRecognitionSessionContext& context = | 254 iter->second->ui.reset(); |
244 GetSessionContext(session_id); | |
245 if (!context.label.empty()) | |
246 BrowserMainLoop::GetMediaStreamManager()->CancelRequest(context.label); | |
247 #endif // !defined(OS_IOS) | |
248 | 255 |
249 base::MessageLoop::current()->PostTask( | 256 base::MessageLoop::current()->PostTask( |
250 FROM_HERE, | 257 FROM_HERE, |
251 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 258 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
252 weak_factory_.GetWeakPtr(), | 259 weak_factory_.GetWeakPtr(), |
253 session_id, | 260 session_id, |
254 EVENT_ABORT)); | 261 EVENT_ABORT)); |
255 } | 262 } |
256 | 263 |
257 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) { | 264 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) { |
258 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 265 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
259 if (!SessionExists(session_id)) | 266 if (!SessionExists(session_id)) |
260 return; | 267 return; |
261 | 268 |
262 #if !defined(OS_IOS) | 269 SessionsTable::iterator iter = sessions_.find(session_id); |
263 const SpeechRecognitionSessionContext& context = | 270 iter->second->ui.reset(); |
264 GetSessionContext(session_id); | |
265 if (!context.label.empty()) | |
266 BrowserMainLoop::GetMediaStreamManager()->CancelRequest(context.label); | |
267 #endif // !defined(OS_IOS) | |
268 | 271 |
269 base::MessageLoop::current()->PostTask( | 272 base::MessageLoop::current()->PostTask( |
270 FROM_HERE, | 273 FROM_HERE, |
271 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 274 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
272 weak_factory_.GetWeakPtr(), | 275 weak_factory_.GetWeakPtr(), |
273 session_id, | 276 session_id, |
274 EVENT_STOP_CAPTURE)); | 277 EVENT_STOP_CAPTURE)); |
275 } | 278 } |
276 | 279 |
277 // Here begins the SpeechRecognitionEventListener interface implementation, | 280 // Here begins the SpeechRecognitionEventListener interface implementation, |
278 // which will simply relay the events to the proper listener registered for the | 281 // which will simply relay the events to the proper listener registered for the |
279 // particular session (most likely InputTagSpeechDispatcherHost) and to the | 282 // particular session (most likely InputTagSpeechDispatcherHost) and to the |
280 // catch-all listener provided by the delegate (if any). | 283 // catch-all listener provided by the delegate (if any). |
281 | 284 |
282 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { | 285 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { |
283 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 286 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
284 if (!SessionExists(session_id)) | 287 if (!SessionExists(session_id)) |
285 return; | 288 return; |
286 | 289 |
287 #if !defined(OS_IOS) | 290 SessionsTable::iterator iter = sessions_.find(session_id); |
288 const SpeechRecognitionSessionContext& context = | 291 if (iter->second->ui) { |
289 GetSessionContext(session_id); | 292 // Notify the UI that the devices are being used. |
290 if (!context.devices.empty()) { | 293 iter->second->ui->OnStarted(base::Closure()); |
291 // Notify the UI the devices are being used. | |
292 BrowserMainLoop::GetMediaStreamManager()->NotifyUIDevicesOpened( | |
293 context.label); | |
294 } | 294 } |
295 #endif // !defined(OS_IOS) | |
296 | 295 |
297 DCHECK_EQ(primary_session_id_, session_id); | 296 DCHECK_EQ(primary_session_id_, session_id); |
298 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) | 297 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) |
299 delegate_listener->OnRecognitionStart(session_id); | 298 delegate_listener->OnRecognitionStart(session_id); |
300 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) | 299 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) |
301 listener->OnRecognitionStart(session_id); | 300 listener->OnRecognitionStart(session_id); |
302 } | 301 } |
303 | 302 |
304 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { | 303 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { |
305 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 304 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
399 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) | 398 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) |
400 delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume); | 399 delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume); |
401 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) | 400 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) |
402 listener->OnAudioLevelsChange(session_id, volume, noise_volume); | 401 listener->OnAudioLevelsChange(session_id, volume, noise_volume); |
403 } | 402 } |
404 | 403 |
405 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { | 404 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { |
406 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 405 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
407 if (!SessionExists(session_id)) | 406 if (!SessionExists(session_id)) |
408 return; | 407 return; |
409 #if !defined(OS_IOS) | |
410 const SpeechRecognitionSessionContext& context = | |
411 GetSessionContext(session_id); | |
412 if (!context.devices.empty()) { | |
413 // Notify the UI the devices has been closed. | |
414 BrowserMainLoop::GetMediaStreamManager()->NotifyUIDevicesClosed( | |
415 context.label); | |
416 } | |
417 #endif // !defined(OS_IOS) | |
418 | 408 |
419 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) | 409 if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) |
420 delegate_listener->OnRecognitionEnd(session_id); | 410 delegate_listener->OnRecognitionEnd(session_id); |
421 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) | 411 if (SpeechRecognitionEventListener* listener = GetListener(session_id)) |
422 listener->OnRecognitionEnd(session_id); | 412 listener->OnRecognitionEnd(session_id); |
423 base::MessageLoop::current()->PostTask( | 413 base::MessageLoop::current()->PostTask( |
424 FROM_HERE, | 414 FROM_HERE, |
425 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, | 415 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, |
426 weak_factory_.GetWeakPtr(), | 416 weak_factory_.GetWeakPtr(), |
427 session_id, | 417 session_id, |
428 EVENT_RECOGNITION_ENDED)); | 418 EVENT_RECOGNITION_ENDED)); |
429 } | 419 } |
430 | 420 |
431 int SpeechRecognitionManagerImpl::GetSession( | 421 int SpeechRecognitionManagerImpl::GetSession( |
432 int render_process_id, int render_view_id, int request_id) const { | 422 int render_process_id, int render_view_id, int request_id) const { |
433 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 423 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
434 SessionsTable::const_iterator iter; | 424 SessionsTable::const_iterator iter; |
435 for(iter = sessions_.begin(); iter != sessions_.end(); ++iter) { | 425 for(iter = sessions_.begin(); iter != sessions_.end(); ++iter) { |
436 const int session_id = iter->first; | 426 const int session_id = iter->first; |
437 const SpeechRecognitionSessionContext& context = iter->second.context; | 427 const SpeechRecognitionSessionContext& context = iter->second->context; |
438 if (context.render_process_id == render_process_id && | 428 if (context.render_process_id == render_process_id && |
439 context.render_view_id == render_view_id && | 429 context.render_view_id == render_view_id && |
440 context.request_id == request_id) { | 430 context.request_id == request_id) { |
441 return session_id; | 431 return session_id; |
442 } | 432 } |
443 } | 433 } |
444 return kSessionIDInvalid; | 434 return kSessionIDInvalid; |
445 } | 435 } |
446 | 436 |
447 SpeechRecognitionSessionContext | 437 SpeechRecognitionSessionContext |
448 SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const { | 438 SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const { |
449 return GetSession(session_id).context; | 439 return GetSession(session_id)->context; |
450 } | 440 } |
451 | 441 |
452 void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( | 442 void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( |
453 SpeechRecognitionEventListener* listener) { | 443 SpeechRecognitionEventListener* listener) { |
454 // This method gracefully destroys sessions for the listener. However, since | 444 // This method gracefully destroys sessions for the listener. However, since |
455 // the listener itself is likely to be destroyed after this call, we avoid | 445 // the listener itself is likely to be destroyed after this call, we avoid |
456 // dispatching further events to it, marking the |listener_is_active| flag. | 446 // dispatching further events to it, marking the |listener_is_active| flag. |
457 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 447 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
458 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); | 448 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); |
459 ++it) { | 449 ++it) { |
460 Session& session = it->second; | 450 Session* session = it->second; |
461 if (session.config.event_listener == listener) { | 451 if (session->config.event_listener == listener) { |
462 AbortSession(session.id); | 452 AbortSession(session->id); |
463 session.listener_is_active = false; | 453 session->listener_is_active = false; |
464 } | 454 } |
465 } | 455 } |
466 } | 456 } |
467 | 457 |
468 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView( | 458 void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView( |
469 int render_process_id, | 459 int render_process_id, |
470 int render_view_id) { | 460 int render_view_id) { |
471 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 461 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
472 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); | 462 for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); |
473 ++it) { | 463 ++it) { |
474 Session& session = it->second; | 464 Session* session = it->second; |
475 if (session.context.render_process_id == render_process_id && | 465 if (session->context.render_process_id == render_process_id && |
476 session.context.render_view_id == render_view_id) { | 466 session->context.render_view_id == render_view_id) { |
477 AbortSession(session.id); | 467 AbortSession(session->id); |
478 } | 468 } |
479 } | 469 } |
480 } | 470 } |
481 | 471 |
482 // ----------------------- Core FSM implementation --------------------------- | 472 // ----------------------- Core FSM implementation --------------------------- |
483 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, | 473 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, |
484 FSMEvent event) { | 474 FSMEvent event) { |
485 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 475 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
486 | 476 |
487 // There are some corner cases in which the session might be deleted (due to | 477 // There are some corner cases in which the session might be deleted (due to |
488 // an EndRecognition event) between a request (e.g. Abort) and its dispatch. | 478 // an EndRecognition event) between a request (e.g. Abort) and its dispatch. |
489 if (!SessionExists(session_id)) | 479 if (!SessionExists(session_id)) |
490 return; | 480 return; |
491 | 481 |
492 const Session& session = GetSession(session_id); | 482 Session* session = GetSession(session_id); |
493 FSMState session_state = GetSessionState(session_id); | 483 FSMState session_state = GetSessionState(session_id); |
494 DCHECK_LE(session_state, SESSION_STATE_MAX_VALUE); | 484 DCHECK_LE(session_state, SESSION_STATE_MAX_VALUE); |
495 DCHECK_LE(event, EVENT_MAX_VALUE); | 485 DCHECK_LE(event, EVENT_MAX_VALUE); |
496 | 486 |
497 // Event dispatching must be sequential, otherwise it will break all the rules | 487 // Event dispatching must be sequential, otherwise it will break all the rules |
498 // and the assumptions of the finite state automata model. | 488 // and the assumptions of the finite state automata model. |
499 DCHECK(!is_dispatching_event_); | 489 DCHECK(!is_dispatching_event_); |
500 is_dispatching_event_ = true; | 490 is_dispatching_event_ = true; |
501 ExecuteTransitionAndGetNextState(session, session_state, event); | 491 ExecuteTransitionAndGetNextState(session, session_state, event); |
502 is_dispatching_event_ = false; | 492 is_dispatching_event_ = false; |
503 } | 493 } |
504 | 494 |
505 // This FSM handles the evolution of each session, from the viewpoint of the | 495 // This FSM handles the evolution of each session, from the viewpoint of the |
506 // interaction with the user (that may be either the browser end-user which | 496 // interaction with the user (that may be either the browser end-user which |
507 // interacts with UI bubbles, or JS developer intracting with JS methods). | 497 // interacts with UI bubbles, or JS developer intracting with JS methods). |
508 // All the events received by the SpeechRecognizer instances (one for each | 498 // All the events received by the SpeechRecognizer instances (one for each |
509 // session) are always routed to the SpeechRecognitionEventListener(s) | 499 // session) are always routed to the SpeechRecognitionEventListener(s) |
510 // regardless the choices taken in this FSM. | 500 // regardless the choices taken in this FSM. |
511 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( | 501 void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( |
512 const Session& session, FSMState session_state, FSMEvent event) { | 502 Session* session, FSMState session_state, FSMEvent event) { |
513 // Note: since we're not tracking the state of the recognizer object, rather | 503 // Note: since we're not tracking the state of the recognizer object, rather |
514 // we're directly retrieving it (through GetSessionState), we see its events | 504 // we're directly retrieving it (through GetSessionState), we see its events |
515 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution | 505 // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution |
516 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just | 506 // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just |
517 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus | 507 // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus |
518 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT). | 508 // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT). |
519 // This makes the code below a bit tricky but avoids a lot of code for | 509 // This makes the code below a bit tricky but avoids a lot of code for |
520 // tracking and reconstructing asynchronously the state of the recognizer. | 510 // tracking and reconstructing asynchronously the state of the recognizer. |
521 switch (session_state) { | 511 switch (session_state) { |
522 case SESSION_STATE_IDLE: | 512 case SESSION_STATE_IDLE: |
523 switch (event) { | 513 switch (event) { |
524 case EVENT_START: | 514 case EVENT_START: |
525 return SessionStart(session); | 515 return SessionStart(*session); |
526 case EVENT_ABORT: | 516 case EVENT_ABORT: |
527 return SessionAbort(session); | 517 return SessionAbort(*session); |
528 case EVENT_RECOGNITION_ENDED: | 518 case EVENT_RECOGNITION_ENDED: |
529 return SessionDelete(session); | 519 return SessionDelete(session); |
530 case EVENT_STOP_CAPTURE: | 520 case EVENT_STOP_CAPTURE: |
531 return SessionStopAudioCapture(session); | 521 return SessionStopAudioCapture(*session); |
532 case EVENT_AUDIO_ENDED: | 522 case EVENT_AUDIO_ENDED: |
533 return; | 523 return; |
534 } | 524 } |
535 break; | 525 break; |
536 case SESSION_STATE_CAPTURING_AUDIO: | 526 case SESSION_STATE_CAPTURING_AUDIO: |
537 switch (event) { | 527 switch (event) { |
538 case EVENT_STOP_CAPTURE: | 528 case EVENT_STOP_CAPTURE: |
539 return SessionStopAudioCapture(session); | 529 return SessionStopAudioCapture(*session); |
540 case EVENT_ABORT: | 530 case EVENT_ABORT: |
541 return SessionAbort(session); | 531 return SessionAbort(*session); |
542 case EVENT_START: | 532 case EVENT_START: |
543 return; | 533 return; |
544 case EVENT_AUDIO_ENDED: | 534 case EVENT_AUDIO_ENDED: |
545 case EVENT_RECOGNITION_ENDED: | 535 case EVENT_RECOGNITION_ENDED: |
546 return NotFeasible(session, event); | 536 return NotFeasible(*session, event); |
547 } | 537 } |
548 break; | 538 break; |
549 case SESSION_STATE_WAITING_FOR_RESULT: | 539 case SESSION_STATE_WAITING_FOR_RESULT: |
550 switch (event) { | 540 switch (event) { |
551 case EVENT_ABORT: | 541 case EVENT_ABORT: |
552 return SessionAbort(session); | 542 return SessionAbort(*session); |
553 case EVENT_AUDIO_ENDED: | 543 case EVENT_AUDIO_ENDED: |
554 return ResetCapturingSessionId(session); | 544 return ResetCapturingSessionId(*session); |
555 case EVENT_START: | 545 case EVENT_START: |
556 case EVENT_STOP_CAPTURE: | 546 case EVENT_STOP_CAPTURE: |
557 return; | 547 return; |
558 case EVENT_RECOGNITION_ENDED: | 548 case EVENT_RECOGNITION_ENDED: |
559 return NotFeasible(session, event); | 549 return NotFeasible(*session, event); |
560 } | 550 } |
561 break; | 551 break; |
562 } | 552 } |
563 return NotFeasible(session, event); | 553 return NotFeasible(*session, event); |
564 } | 554 } |
565 | 555 |
566 SpeechRecognitionManagerImpl::FSMState | 556 SpeechRecognitionManagerImpl::FSMState |
567 SpeechRecognitionManagerImpl::GetSessionState(int session_id) const { | 557 SpeechRecognitionManagerImpl::GetSessionState(int session_id) const { |
568 const Session& session = GetSession(session_id); | 558 Session* session = GetSession(session_id); |
569 if (!session.recognizer.get() || !session.recognizer->IsActive()) | 559 if (!session->recognizer.get() || !session->recognizer->IsActive()) |
570 return SESSION_STATE_IDLE; | 560 return SESSION_STATE_IDLE; |
571 if (session.recognizer->IsCapturingAudio()) | 561 if (session->recognizer->IsCapturingAudio()) |
572 return SESSION_STATE_CAPTURING_AUDIO; | 562 return SESSION_STATE_CAPTURING_AUDIO; |
573 return SESSION_STATE_WAITING_FOR_RESULT; | 563 return SESSION_STATE_WAITING_FOR_RESULT; |
574 } | 564 } |
575 | 565 |
576 // ----------- Contract for all the FSM evolution functions below ------------- | 566 // ----------- Contract for all the FSM evolution functions below ------------- |
577 // - Are guaranteed to be executed in the IO thread; | 567 // - Are guaranteed to be executed in the IO thread; |
578 // - Are guaranteed to be not reentrant (themselves and each other); | 568 // - Are guaranteed to be not reentrant (themselves and each other); |
579 | 569 |
580 void SpeechRecognitionManagerImpl::SessionStart(const Session& session) { | 570 void SpeechRecognitionManagerImpl::SessionStart(const Session& session) { |
581 DCHECK_EQ(primary_session_id_, session.id); | 571 DCHECK_EQ(primary_session_id_, session.id); |
(...skipping 12 matching lines...) Expand all Loading... |
594 DCHECK(session.recognizer.get()); | 584 DCHECK(session.recognizer.get()); |
595 session.recognizer->StopAudioCapture(); | 585 session.recognizer->StopAudioCapture(); |
596 } | 586 } |
597 | 587 |
598 void SpeechRecognitionManagerImpl::ResetCapturingSessionId( | 588 void SpeechRecognitionManagerImpl::ResetCapturingSessionId( |
599 const Session& session) { | 589 const Session& session) { |
600 DCHECK_EQ(primary_session_id_, session.id); | 590 DCHECK_EQ(primary_session_id_, session.id); |
601 primary_session_id_ = kSessionIDInvalid; | 591 primary_session_id_ = kSessionIDInvalid; |
602 } | 592 } |
603 | 593 |
604 void SpeechRecognitionManagerImpl::SessionDelete(const Session& session) { | 594 void SpeechRecognitionManagerImpl::SessionDelete(Session* session) { |
605 DCHECK(session.recognizer.get() == NULL || !session.recognizer->IsActive()); | 595 DCHECK(session->recognizer == NULL || !session->recognizer->IsActive()); |
606 if (primary_session_id_ == session.id) | 596 if (primary_session_id_ == session->id) |
607 primary_session_id_ = kSessionIDInvalid; | 597 primary_session_id_ = kSessionIDInvalid; |
608 sessions_.erase(session.id); | 598 sessions_.erase(session->id); |
| 599 delete session; |
609 } | 600 } |
610 | 601 |
611 void SpeechRecognitionManagerImpl::NotFeasible(const Session& session, | 602 void SpeechRecognitionManagerImpl::NotFeasible(const Session& session, |
612 FSMEvent event) { | 603 FSMEvent event) { |
613 NOTREACHED() << "Unfeasible event " << event | 604 NOTREACHED() << "Unfeasible event " << event |
614 << " in state " << GetSessionState(session.id) | 605 << " in state " << GetSessionState(session.id) |
615 << " for session " << session.id; | 606 << " for session " << session.id; |
616 } | 607 } |
617 | 608 |
618 int SpeechRecognitionManagerImpl::GetNextSessionID() { | 609 int SpeechRecognitionManagerImpl::GetNextSessionID() { |
619 ++last_session_id_; | 610 ++last_session_id_; |
620 // Deal with wrapping of last_session_id_. (How civilized). | 611 // Deal with wrapping of last_session_id_. (How civilized). |
621 if (last_session_id_ <= 0) | 612 if (last_session_id_ <= 0) |
622 last_session_id_ = 1; | 613 last_session_id_ = 1; |
623 return last_session_id_; | 614 return last_session_id_; |
624 } | 615 } |
625 | 616 |
626 bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const { | 617 bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const { |
627 return sessions_.find(session_id) != sessions_.end(); | 618 return sessions_.find(session_id) != sessions_.end(); |
628 } | 619 } |
629 | 620 |
630 const SpeechRecognitionManagerImpl::Session& | 621 SpeechRecognitionManagerImpl::Session* |
631 SpeechRecognitionManagerImpl::GetSession(int session_id) const { | 622 SpeechRecognitionManagerImpl::GetSession(int session_id) const { |
632 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 623 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); |
633 SessionsTable::const_iterator iter = sessions_.find(session_id); | 624 SessionsTable::const_iterator iter = sessions_.find(session_id); |
634 DCHECK(iter != sessions_.end()); | 625 DCHECK(iter != sessions_.end()); |
635 return iter->second; | 626 return iter->second; |
636 } | 627 } |
637 | 628 |
638 SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( | 629 SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( |
639 int session_id) const { | 630 int session_id) const { |
640 const Session& session = GetSession(session_id); | 631 Session* session = GetSession(session_id); |
641 return session.listener_is_active ? session.config.event_listener : NULL; | 632 return session->listener_is_active ? session->config.event_listener : NULL; |
642 } | 633 } |
643 | 634 |
644 SpeechRecognitionEventListener* | 635 SpeechRecognitionEventListener* |
645 SpeechRecognitionManagerImpl::GetDelegateListener() const { | 636 SpeechRecognitionManagerImpl::GetDelegateListener() const { |
646 return delegate_.get() ? delegate_->GetEventListener() : NULL; | 637 return delegate_.get() ? delegate_->GetEventListener() : NULL; |
647 } | 638 } |
648 | 639 |
649 const SpeechRecognitionSessionConfig& | 640 const SpeechRecognitionSessionConfig& |
650 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id) const { | 641 SpeechRecognitionManagerImpl::GetSessionConfig(int session_id) const { |
651 return GetSession(session_id).config; | 642 return GetSession(session_id)->config; |
652 } | 643 } |
653 | 644 |
654 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { | 645 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { |
655 return BrowserMainLoop::GetAudioManager()->HasAudioInputDevices(); | 646 return BrowserMainLoop::GetAudioManager()->HasAudioInputDevices(); |
656 } | 647 } |
657 | 648 |
658 string16 SpeechRecognitionManagerImpl::GetAudioInputDeviceModel() { | 649 string16 SpeechRecognitionManagerImpl::GetAudioInputDeviceModel() { |
659 return BrowserMainLoop::GetAudioManager()->GetAudioInputDeviceModel(); | 650 return BrowserMainLoop::GetAudioManager()->GetAudioInputDeviceModel(); |
660 } | 651 } |
661 | 652 |
662 void SpeechRecognitionManagerImpl::ShowAudioInputSettings() { | 653 void SpeechRecognitionManagerImpl::ShowAudioInputSettings() { |
663 // Since AudioManager::ShowAudioInputSettings can potentially launch external | 654 // Since AudioManager::ShowAudioInputSettings can potentially launch external |
664 // processes, do that in the FILE thread to not block the calling threads. | 655 // processes, do that in the FILE thread to not block the calling threads. |
665 BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE, | 656 BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE, |
666 base::Bind(&ShowAudioInputSettingsOnFileThread)); | 657 base::Bind(&ShowAudioInputSettingsOnFileThread)); |
667 } | 658 } |
668 | 659 |
669 SpeechRecognitionManagerImpl::Session::Session() | 660 SpeechRecognitionManagerImpl::Session::Session() |
670 : id(kSessionIDInvalid), | 661 : id(kSessionIDInvalid), |
671 listener_is_active(true) { | 662 listener_is_active(true) { |
672 } | 663 } |
673 | 664 |
674 SpeechRecognitionManagerImpl::Session::~Session() { | 665 SpeechRecognitionManagerImpl::Session::~Session() { |
675 } | 666 } |
676 | 667 |
677 } // namespace content | 668 } // namespace content |
OLD | NEW |