OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/browser/speech/speech_recognition_manager_impl.h" | 5 #include "content/browser/speech/speech_recognition_manager_impl.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/memory/singleton.h" | |
8 #include "content/browser/browser_main_loop.h" | 9 #include "content/browser/browser_main_loop.h" |
9 #include "content/browser/renderer_host/render_view_host_impl.h" | 10 #include "content/browser/speech/google_one_shot_remote_engine.h" |
10 #include "content/browser/speech/input_tag_speech_dispatcher_host.h" | 11 #include "content/browser/speech/speech_recognition_engine.h" |
12 #include "content/browser/speech/speech_recognizer_impl.h" | |
11 #include "content/public/browser/browser_thread.h" | 13 #include "content/public/browser/browser_thread.h" |
12 #include "content/public/browser/content_browser_client.h" | 14 #include "content/public/browser/content_browser_client.h" |
13 #include "content/public/browser/speech_recognizer.h" | |
14 #include "content/public/browser/render_view_host_delegate.h" | |
15 #include "content/public/browser/resource_context.h" | 15 #include "content/public/browser/resource_context.h" |
16 #include "content/public/browser/speech_recognition_event_listener.h" | |
16 #include "content/public/browser/speech_recognition_manager_delegate.h" | 17 #include "content/public/browser/speech_recognition_manager_delegate.h" |
17 #include "content/public/browser/speech_recognition_preferences.h" | 18 #include "content/public/browser/speech_recognition_session_config.h" |
18 #include "content/public/common/view_type.h" | 19 #include "content/public/browser/speech_recognition_session_context.h" |
20 #include "content/public/common/speech_recognition_result.h" | |
19 #include "media/audio/audio_manager.h" | 21 #include "media/audio/audio_manager.h" |
20 | 22 |
23 using base::Callback; | |
24 using base::Unretained; | |
21 using content::BrowserMainLoop; | 25 using content::BrowserMainLoop; |
22 using content::BrowserThread; | 26 using content::BrowserThread; |
23 using content::RenderViewHostImpl; | 27 using content::SpeechRecognitionError; |
28 using content::SpeechRecognitionEventListener; | |
24 using content::SpeechRecognitionManager; | 29 using content::SpeechRecognitionManager; |
25 using content::SpeechRecognitionManagerDelegate; | 30 using content::SpeechRecognitionResult; |
31 using content::SpeechRecognitionSessionContext; | |
32 using content::SpeechRecognitionSessionConfig; | |
33 using media::AudioManager; | |
Satish
2012/04/19 13:03:19
same as below
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
34 using std::string; | |
Satish
2012/04/19 13:03:19
could remove this as string is used only in 1 plac
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
35 | |
36 namespace { | |
37 | |
38 // A dummy implementation of the SpeechRecognitionManagerDelegate interface | |
Satish
2012/04/19 13:03:19
no indentation required within namespaces
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
39 // used when no delegate has been passed to the SpeechRecognitionManagerImpl. | |
40 class VoidRecognitionManagerDelegate : | |
41 public content::SpeechRecognitionManagerDelegate { | |
42 public: | |
43 static VoidRecognitionManagerDelegate* GetInstance() { | |
44 return Singleton<VoidRecognitionManagerDelegate>::get(); | |
45 } | |
46 virtual void GetDiagnosticInformation( | |
47 bool* can_report_metrics, std::string* request_info) OVERRIDE {} | |
48 virtual bool CheckRecognitionIsAllowed(int session_id) OVERRIDE { | |
49 return false; } | |
50 virtual void ShowRecognitionRequested(int session_id) OVERRIDE {} | |
51 virtual void ShowWarmUp(int session_id) OVERRIDE {} | |
52 virtual void ShowRecognizing(int session_id) OVERRIDE {} | |
53 virtual void ShowRecording(int session_id) OVERRIDE {} | |
54 virtual void ShowInputVolume( | |
55 int session_id, float volume, float noise_volume) OVERRIDE {} | |
56 virtual void ShowError(int session_id, | |
57 const content::SpeechRecognitionError& error) OVERRIDE {} | |
58 virtual void DoClose(int session_id) OVERRIDE {} | |
59 | |
60 private: | |
61 VoidRecognitionManagerDelegate() {} | |
62 virtual ~VoidRecognitionManagerDelegate() {} | |
63 friend struct DefaultSingletonTraits<VoidRecognitionManagerDelegate>; | |
64 }; | |
65 } //namespace | |
Satish
2012/04/19 13:03:19
add 2 spaces before // and 1 space after //
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
66 | |
67 namespace content { | |
68 const int SpeechRecognitionManager::kSessionIDInvalid = 0; | |
26 | 69 |
27 SpeechRecognitionManager* SpeechRecognitionManager::GetInstance() { | 70 SpeechRecognitionManager* SpeechRecognitionManager::GetInstance() { |
28 return speech::SpeechRecognitionManagerImpl::GetInstance(); | 71 return speech::SpeechRecognitionManagerImpl::GetInstance(); |
29 } | 72 } |
73 } // namespace content | |
30 | 74 |
31 namespace speech { | 75 namespace speech { |
32 | 76 |
33 struct SpeechRecognitionManagerImpl::SpeechRecognitionParams { | |
34 SpeechRecognitionParams( | |
35 InputTagSpeechDispatcherHost* delegate, | |
36 int session_id, | |
37 int render_process_id, | |
38 int render_view_id, | |
39 const gfx::Rect& element_rect, | |
40 const std::string& language, | |
41 const std::string& grammar, | |
42 const std::string& origin_url, | |
43 net::URLRequestContextGetter* context_getter, | |
44 content::SpeechRecognitionPreferences* recognition_prefs) | |
45 : delegate(delegate), | |
46 session_id(session_id), | |
47 render_process_id(render_process_id), | |
48 render_view_id(render_view_id), | |
49 element_rect(element_rect), | |
50 language(language), | |
51 grammar(grammar), | |
52 origin_url(origin_url), | |
53 context_getter(context_getter), | |
54 recognition_prefs(recognition_prefs) { | |
55 } | |
56 | |
57 InputTagSpeechDispatcherHost* delegate; | |
58 int session_id; | |
59 int render_process_id; | |
60 int render_view_id; | |
61 gfx::Rect element_rect; | |
62 std::string language; | |
63 std::string grammar; | |
64 std::string origin_url; | |
65 net::URLRequestContextGetter* context_getter; | |
66 content::SpeechRecognitionPreferences* recognition_prefs; | |
67 }; | |
68 | |
69 SpeechRecognitionManagerImpl* SpeechRecognitionManagerImpl::GetInstance() { | 77 SpeechRecognitionManagerImpl* SpeechRecognitionManagerImpl::GetInstance() { |
70 return Singleton<SpeechRecognitionManagerImpl>::get(); | 78 return Singleton<SpeechRecognitionManagerImpl>::get(); |
71 } | 79 } |
72 | 80 |
73 SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl() | 81 SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl() |
74 : can_report_metrics_(false), | 82 : interactive_session_id_(0), |
75 recording_session_id_(0) { | 83 last_session_id_(0), |
76 delegate_.reset(content::GetContentClient()->browser()-> | 84 is_dispatching_event_(false) { |
77 GetSpeechRecognitionManagerDelegate()); | 85 delegate_ = content::GetContentClient()->browser()-> |
86 GetSpeechRecognitionManagerDelegate(); | |
87 // In lack of one being provided, instantiate a void delegate so we can avoid | |
88 // unaesthetic "if (delegate_ != NULL)" statements. | |
89 if (delegate_ == NULL) | |
Satish
2012/04/19 13:03:19
use 'if (!delegate_)'
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Hmm I'd prefer the == / != NULL form since:
1. It
| |
90 delegate_ = VoidRecognitionManagerDelegate::GetInstance(); | |
78 } | 91 } |
79 | 92 |
80 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { | 93 SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { |
81 while (requests_.begin() != requests_.end()) | 94 // Recognition sessions will be aborted by the corresponding destructors. |
Satish
2012/04/19 13:03:19
I see the destructor of Session not explicitly abo
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Yes, I meant, by destructor calls of the sessions_
Satish
2012/04/23 10:25:17
That is a bit worrying because the session could b
| |
82 CancelRecognition(requests_.begin()->first); | 95 sessions_.clear(); |
83 } | 96 } |
97 | |
98 int SpeechRecognitionManagerImpl::CreateSession( | |
99 SpeechRecognitionSessionConfig& config, | |
Satish
2012/04/19 13:03:19
can this be changed to a const reference?
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
100 SpeechRecognitionEventListener* event_listener) { | |
101 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
102 | |
103 const int session_id = GetNextSessionID(); | |
104 DCHECK(!SessionExists(session_id)); | |
105 // Set-up the new session. | |
106 Session& session = sessions_[session_id]; | |
107 session.id = session_id; | |
108 session.event_listener = event_listener; | |
109 session.context = config.initial_context; | |
110 | |
111 // TODO(primiano) Is this check enough just on creation or shall we move/copy | |
112 // it on SessionStart in order to repeat the check every time?. | |
113 if (!delegate_->CheckRecognitionIsAllowed(session_id)) { | |
114 sessions_.erase(session_id); | |
115 return kSessionIDInvalid; | |
116 } | |
117 | |
118 string hardware_info; | |
119 bool can_report_metrics; | |
120 delegate_->GetDiagnosticInformation(&can_report_metrics, &hardware_info); | |
121 | |
122 GoogleOneShotRemoteEngineConfig remote_engine_config; | |
123 remote_engine_config.language = config.language; | |
124 remote_engine_config.grammar = config.grammar; | |
125 remote_engine_config.audio_sample_rate = | |
126 SpeechRecognizerImpl::kAudioSampleRate; | |
127 remote_engine_config.audio_num_bits_per_sample = | |
128 SpeechRecognizerImpl::kNumBitsPerAudioSample; | |
129 remote_engine_config.filter_profanities = config.filter_profanities; | |
130 remote_engine_config.hardware_info = hardware_info; | |
131 remote_engine_config.origin_url = can_report_metrics ? config.origin_url : ""; | |
132 | |
133 GoogleOneShotRemoteEngine* google_remote_engine = | |
134 new GoogleOneShotRemoteEngine(config.url_context_getter); | |
135 google_remote_engine->SetConfig(remote_engine_config); | |
136 | |
137 session.recognizer = new SpeechRecognizerImpl(this, | |
138 session_id, | |
139 google_remote_engine); | |
140 return session_id; | |
141 } | |
142 | |
143 void SpeechRecognitionManagerImpl::StartSession(int session_id) { | |
144 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
145 DCHECK(SessionExists(session_id)); | |
146 | |
147 // If there is another interactive session, detach prior to start the new one. | |
148 if (interactive_session_id_ > 0 && interactive_session_id_ != session_id) { | |
Satish
2012/04/19 13:03:19
could remove braces
Satish
2012/04/19 13:03:19
> 0 should be changed to != kSessionIDInvalid here
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Oops, right!
| |
149 DetachSession(interactive_session_id_); | |
150 } | |
151 | |
152 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, | |
153 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), | |
154 session_id, FSMEventArgs(EVENT_START))); | |
155 } | |
156 | |
157 void SpeechRecognitionManagerImpl::AbortSession(int session_id) { | |
158 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
159 DCHECK(SessionExists(session_id)); | |
160 | |
161 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, | |
162 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), | |
163 session_id, FSMEventArgs(EVENT_ABORT))); | |
164 } | |
165 | |
166 void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) { | |
167 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
168 DCHECK(SessionExists(session_id)); | |
169 | |
170 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, | |
171 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), | |
172 session_id, FSMEventArgs(EVENT_STOP_CAPTURE))); | |
173 } | |
174 | |
175 void SpeechRecognitionManagerImpl::DetachSession(int session_id) { | |
176 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
177 DCHECK(SessionExists(session_id)); | |
178 | |
179 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, | |
180 base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), | |
181 session_id, FSMEventArgs(EVENT_DETACH))); | |
182 } | |
183 | |
184 // Here begins the SpeechRecognitionEventListener interface implementation, | |
185 // which will simply relay the events to the proper listener registered for the | |
186 // particular session (most likely InputTagSpeechDispatcherHost) and intercept | |
187 // some of them to provide UI notifications. | |
188 | |
189 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { | |
190 if (!SessionExists(session_id)) | |
191 return; | |
192 DCHECK_EQ(interactive_session_id_, session_id); | |
193 delegate_->ShowWarmUp(session_id); | |
194 GetListener(session_id)->OnRecognitionStart(session_id); | |
195 } | |
196 | |
197 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { | |
198 if (!SessionExists(session_id)) | |
199 return; | |
200 DCHECK_EQ(interactive_session_id_, session_id); | |
201 delegate_->ShowRecording(session_id); | |
202 GetListener(session_id)->OnAudioStart(session_id); | |
203 } | |
204 | |
205 void SpeechRecognitionManagerImpl::OnEnvironmentEstimationComplete( | |
206 int session_id) { | |
207 if (!SessionExists(session_id)) | |
208 return; | |
209 DCHECK_EQ(interactive_session_id_, session_id); | |
210 GetListener(session_id)->OnEnvironmentEstimationComplete(session_id); | |
211 } | |
212 | |
213 void SpeechRecognitionManagerImpl::OnSoundStart(int session_id) { | |
214 if (!SessionExists(session_id)) | |
215 return; | |
216 DCHECK_EQ(interactive_session_id_, session_id); | |
217 GetListener(session_id)->OnSoundStart(session_id); | |
218 } | |
219 | |
220 void SpeechRecognitionManagerImpl::OnSoundEnd(int session_id) { | |
221 if (!SessionExists(session_id)) | |
222 return; | |
223 GetListener(session_id)->OnSoundEnd(session_id); | |
224 } | |
225 | |
226 void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id) { | |
227 if (!SessionExists(session_id)) | |
228 return; | |
229 | |
230 // OnAudioEnd can also be raised after an abort request, when the session is | |
231 // not interactive anymore. | |
232 if (interactive_session_id_ == session_id) | |
233 delegate_->ShowRecognizing(session_id); | |
234 | |
235 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
Satish
2012/04/19 13:03:19
could move this check to the very beginning (same
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Right. Added in all public methods.
| |
236 GetListener(session_id)->OnAudioEnd(session_id); | |
237 DispatchEvent(session_id, FSMEventArgs(EVENT_AUDIO_ENDED)); | |
238 } | |
239 | |
240 void SpeechRecognitionManagerImpl::OnRecognitionResult( | |
241 int session_id, const content::SpeechRecognitionResult& result) { | |
242 if (!SessionExists(session_id)) | |
243 return; | |
244 | |
245 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
246 GetListener(session_id)->OnRecognitionResult(session_id, result); | |
247 FSMEventArgs event_args(EVENT_RECOGNITION_RESULT); | |
248 event_args.speech_result = &result; | |
Satish
2012/04/19 13:03:19
can this be passed by value to avoid any use-after
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Removed. Was not used at all.
| |
249 DispatchEvent(session_id, event_args); | |
250 } | |
251 | |
252 void SpeechRecognitionManagerImpl::OnRecognitionError( | |
253 int session_id, const content::SpeechRecognitionError& error) { | |
254 if (!SessionExists(session_id)) | |
255 return; | |
256 | |
257 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
258 GetListener(session_id)->OnRecognitionError(session_id, error); | |
259 FSMEventArgs event_args(EVENT_RECOGNITION_ERROR); | |
260 event_args.speech_error = &error; | |
Satish
2012/04/19 13:03:19
ditto
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
261 DispatchEvent(session_id, event_args); | |
262 } | |
263 | |
264 void SpeechRecognitionManagerImpl::OnAudioLevelsChange( | |
265 int session_id, float volume, float noise_volume) { | |
266 if (!SessionExists(session_id)) | |
267 return; | |
268 delegate_->ShowInputVolume(session_id, volume, noise_volume); | |
269 GetListener(session_id)->OnAudioLevelsChange(session_id, volume, | |
270 noise_volume); | |
271 } | |
272 | |
273 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { | |
274 if (!SessionExists(session_id)) | |
275 return; | |
276 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
277 GetListener(session_id)->OnRecognitionEnd(session_id); | |
278 DispatchEvent(session_id, FSMEventArgs(EVENT_RECOGNITION_ENDED)); | |
279 } | |
280 | |
281 int SpeechRecognitionManagerImpl::LookupSessionByContext( | |
Satish
2012/04/19 13:03:19
Since this class is the broker for SessionContext
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Can we keep this as a TODO and do it after CL2 and
Satish
2012/04/23 10:25:17
I'd prefer we make this simpler now and change it
| |
282 Callback<bool(const SpeechRecognitionSessionContext&)> matcher) const { | |
Satish
2012/04/19 13:03:19
indent by 4 spaces only
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
283 SessionsTable::const_iterator iter; | |
284 // Note: the callback (matcher) must NEVER perform non-const calls on us. | |
285 for(iter = sessions_.begin(); iter != sessions_.end(); iter++) { | |
286 const int session_id = iter->first; | |
287 const Session& session = iter->second; | |
288 bool matches = matcher.Run(session.context); | |
289 if (matches) | |
290 return session_id; | |
291 } | |
292 return 0; | |
293 } | |
294 | |
295 // TODO(primiano) are we really sure that iterator->second will always give us | |
Satish
2012/04/19 13:03:19
I've seen more code rely on that behavior so could
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
296 // a reference to the inner contained object, and not a copy? Couldn't find any | |
297 // strong indication in the STL doc. | |
298 SpeechRecognitionSessionContext& | |
Satish
2012/04/19 13:03:19
could return this by value or if you prefer pass i
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
299 SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const { | |
300 SessionsTable::const_iterator iter = sessions_.find(session_id); | |
301 DCHECK(iter != sessions_.end()); | |
302 return const_cast<SpeechRecognitionSessionContext&>(iter->second.context); | |
303 } | |
304 | |
305 void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( | |
306 SpeechRecognitionEventListener* listener) { | |
307 SessionsTable::iterator it = sessions_.begin(); | |
308 // AbortSession is asyncrhonous and the session will not be removed from the | |
Satish
2012/04/19 13:03:19
asyncrhonous -> asynchronous
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
309 // collection while we are iterating over it. | |
310 while (it != sessions_.end()) { | |
311 if (it->second.event_listener == listener) | |
312 AbortSession(it->first); | |
313 ++it; | |
314 } | |
315 } | |
316 | |
317 // ----------------------- Core FSM implementation --------------------------- | |
318 void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, | |
319 FSMEventArgs event_args) { | |
320 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
321 if (!SessionExists(session_id)) | |
322 return; | |
323 | |
324 Session& session = sessions_[session_id]; | |
325 DCHECK_LE(session.state, STATE_MAX_VALUE); | |
326 DCHECK_LE(event_args.event, EVENT_MAX_VALUE); | |
327 | |
328 // Event dispatching must be sequential, otherwise it will break all the rules | |
329 // and the assumptions of the finite state automata model. | |
330 DCHECK(!is_dispatching_event_); | |
331 is_dispatching_event_ = true; | |
332 | |
333 // Pedantic consistency checks. | |
334 if (session.state == STATE_INTERACTIVE || | |
335 session.state == STATE_DETACHABLE || | |
336 session.state == STATE_ENDED_WITH_ERROR) | |
Satish
2012/04/19 13:03:19
suggest braces for multiline if statements like th
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
337 DCHECK_EQ(interactive_session_id_, session_id); | |
338 if (session.state == STATE_IDLE || | |
339 session.state == STATE_DETACHED || | |
340 session.state == STATE_ABORTING) | |
341 DCHECK_NE(interactive_session_id_, session_id); | |
342 | |
343 session.state = ExecuteTransitionAndGetNextState(session, event_args); | |
344 | |
345 is_dispatching_event_ = false; | |
346 } | |
347 | |
348 // This FSM handles the evolution of each session only from the UI viewpoint, | |
349 // coordinating the UI bubbles and the user input actions. | |
350 // All the events received by the SpeechRecognizerImpl instances (one for each | |
351 // session) are always routed to the SpeechRecognitionEventListener(s) | |
352 // regardless the choices taken in this FSM. | |
353 SpeechRecognitionManagerImpl::FSMState | |
354 SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( | |
355 Session& session, const FSMEventArgs& event_args) { | |
356 const FSMEvent event = event_args.event; | |
357 switch (session.state) { | |
358 case STATE_IDLE: | |
359 switch (event) { | |
360 case EVENT_ABORT: | |
361 return SessionDelete(session, event_args); | |
362 case EVENT_START: | |
363 return SessionStart(session, event_args); | |
364 case EVENT_STOP_CAPTURE: | |
365 case EVENT_DETACH: | |
366 return DoNothing(session, event_args); | |
367 case EVENT_AUDIO_ENDED: | |
368 case EVENT_RECOGNITION_ENDED: | |
369 case EVENT_RECOGNITION_RESULT: | |
370 case EVENT_RECOGNITION_ERROR: | |
371 return NotFeasible(session, event_args); | |
372 } | |
373 break; | |
374 case STATE_INTERACTIVE: | |
375 switch (event) { | |
376 case EVENT_ABORT: | |
377 return SessionAbort(session, event_args); | |
378 case EVENT_START: | |
379 return DoNothing(session, event_args); | |
Satish
2012/04/19 13:03:19
should be calling NotFeasible instead of DoNothing
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
This code is changed. Btw the idea is that in this
| |
380 case EVENT_STOP_CAPTURE: | |
381 return SessionStopAudioCapture(session, event_args); | |
382 case EVENT_DETACH: | |
383 return SessionAbort(session, event_args); | |
384 case EVENT_AUDIO_ENDED: | |
385 return SessionSetDetachable(session, event_args); | |
386 case EVENT_RECOGNITION_ENDED: | |
387 case EVENT_RECOGNITION_RESULT: | |
388 // TODO(primiano) Valid only in single shot mode. | |
389 return NotFeasible(session, event_args); | |
390 case EVENT_RECOGNITION_ERROR: | |
391 return SessionReportError(session, event_args); | |
392 } | |
393 break; | |
394 case STATE_DETACHABLE: | |
Satish
2012/04/19 13:03:19
it would be clearer to understand if 'detachable'
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Simlpified the FSM. Now should be clearer.
| |
395 // STATE_DETACHABLE implies that the session is still interactive, but it | |
396 // is not capturing audio anymore. | |
397 switch (event) { | |
398 case EVENT_ABORT: | |
399 return SessionAbort(session, event_args); | |
400 case EVENT_START: | |
401 case EVENT_STOP_CAPTURE: | |
402 return DoNothing(session, event_args); | |
403 case EVENT_DETACH: | |
404 return SessionDetach(session, event_args); | |
405 case EVENT_AUDIO_ENDED: | |
406 return NotFeasible(session, event_args); | |
407 case EVENT_RECOGNITION_ENDED: | |
408 return SessionReportNoMatch(session, event_args); | |
409 case EVENT_RECOGNITION_RESULT: | |
410 // TODO(primiano) Valid only in single shot mode. | |
411 return SessionDetach(session, event_args); | |
412 case EVENT_RECOGNITION_ERROR: | |
413 return SessionReportError(session, event_args); | |
414 } | |
415 break; | |
416 case STATE_DETACHED: | |
Satish
2012/04/19 13:03:19
since this seems to be used only in one shot recog
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Ditto.
| |
417 switch (event) { | |
418 case EVENT_ABORT: | |
419 return SessionAbort(session, event_args); | |
420 case EVENT_START: | |
421 case EVENT_STOP_CAPTURE: | |
422 case EVENT_DETACH: | |
423 return DoNothing(session, event_args); | |
424 case EVENT_AUDIO_ENDED: | |
425 return NotFeasible(session, event_args); | |
426 case EVENT_RECOGNITION_ENDED: | |
427 return SessionDelete(session, event_args); | |
428 case EVENT_RECOGNITION_RESULT: | |
429 case EVENT_RECOGNITION_ERROR: | |
430 return DoNothing(session, event_args); | |
431 } | |
432 break; | |
433 case STATE_ABORTING: | |
434 // STATE_ABORTING implies that the session is already detached. | |
435 switch (event) { | |
436 case EVENT_ABORT: | |
437 case EVENT_START: | |
438 case EVENT_STOP_CAPTURE: | |
439 case EVENT_DETACH: | |
440 case EVENT_AUDIO_ENDED: | |
441 return DoNothing(session, event_args); | |
442 case EVENT_RECOGNITION_ENDED: | |
443 return SessionDelete(session, event_args); | |
444 case EVENT_RECOGNITION_RESULT: | |
445 case EVENT_RECOGNITION_ERROR: | |
446 return DoNothing(session, event_args); | |
447 } | |
448 break; | |
449 case STATE_ENDED_WITH_ERROR: | |
450 switch (event) { | |
451 case EVENT_ABORT: | |
452 return SessionDelete(session, event_args); | |
453 case EVENT_START: | |
454 return SessionStart(session, event_args); | |
455 case EVENT_STOP_CAPTURE: | |
456 return DoNothing(session, event_args); | |
457 case EVENT_DETACH: | |
458 return SessionDelete(session, event_args); | |
459 case EVENT_AUDIO_ENDED: | |
460 return NotFeasible(session, event_args); | |
461 case EVENT_RECOGNITION_ENDED: | |
462 return DoNothing(session, event_args); | |
463 case EVENT_RECOGNITION_RESULT: | |
464 return NotFeasible(session, event_args); | |
465 case EVENT_RECOGNITION_ERROR: | |
466 return DoNothing(session, event_args); | |
467 } | |
468 break; | |
469 } | |
470 return NotFeasible(session, event_args); | |
471 } | |
472 | |
473 // ----------- Contract for all the FSM evolution functions below ------------- | |
474 // - Are guaranteed to be executed in the IO thread; | |
475 // - Are guaranteed to be not reentrant (themselves and each other); | |
476 // - event_args members are guaranteed to be stable during the call; | |
477 | |
478 SpeechRecognitionManagerImpl::FSMState | |
479 SpeechRecognitionManagerImpl::SessionStart(Session& session, | |
480 const FSMEventArgs& event_args) { | |
481 if (interactive_session_id_ != 0) | |
Satish
2012/04/19 13:03:19
replace 0 with kSessionIDInvalid here and other pl
Primiano Tucci (use gerrit)
2012/04/20 16:06:43
Done.
| |
482 delegate_->DoClose(interactive_session_id_); | |
483 interactive_session_id_ = session.id; | |
484 delegate_->ShowRecognitionRequested(session.id); | |
485 session.error_occurred = false; | |
486 session.recognizer->StartRecognition(); | |
487 return STATE_INTERACTIVE; | |
488 } | |
489 | |
490 SpeechRecognitionManagerImpl::FSMState | |
491 SpeechRecognitionManagerImpl::SessionAbort(Session& session, | |
492 const FSMEventArgs& event_args) { | |
493 DCHECK(session.recognizer.get() != NULL); | |
494 DCHECK(session.recognizer->IsActive()); | |
495 session.recognizer->AbortRecognition(); | |
496 if (interactive_session_id_ == session.id) { | |
497 interactive_session_id_ = 0; | |
498 delegate_->DoClose(session.id); | |
499 } | |
500 return STATE_ABORTING; | |
501 } | |
502 | |
503 SpeechRecognitionManagerImpl::FSMState | |
504 SpeechRecognitionManagerImpl::SessionStopAudioCapture( | |
505 Session& session, const FSMEventArgs& event_args) { | |
506 DCHECK(session.recognizer.get() != NULL); | |
507 DCHECK(session.recognizer->IsActive()); | |
508 session.recognizer->StopAudioCapture(); | |
509 return STATE_DETACHABLE; | |
510 } | |
511 | |
512 SpeechRecognitionManagerImpl::FSMState | |
513 SpeechRecognitionManagerImpl::SessionDetach(Session& session, | |
514 const FSMEventArgs& event_args) { | |
515 DCHECK_EQ(interactive_session_id_, session.id); | |
516 interactive_session_id_ = 0; | |
517 delegate_->DoClose(session.id); | |
518 return STATE_DETACHED; | |
519 } | |
520 | |
521 SpeechRecognitionManagerImpl::FSMState | |
522 SpeechRecognitionManagerImpl::SessionReportError( | |
523 Session& session, const FSMEventArgs& event_args) { | |
524 DCHECK_EQ(interactive_session_id_, session.id); | |
525 DCHECK(event_args.speech_error != NULL); | |
526 delegate_->ShowError(session.id, *event_args.speech_error); | |
527 session.error_occurred = true; | |
528 return STATE_ENDED_WITH_ERROR; | |
529 } | |
530 | |
531 SpeechRecognitionManagerImpl::FSMState | |
532 SpeechRecognitionManagerImpl::SessionReportNoMatch( | |
533 Session& session, const FSMEventArgs& event_args) { | |
534 DCHECK_EQ(interactive_session_id_, session.id); | |
535 DCHECK(!session.error_occurred); | |
536 delegate_->ShowError( | |
537 session.id, | |
538 SpeechRecognitionError(content::SPEECH_RECOGNITION_ERROR_NO_MATCH)); | |
539 return STATE_ENDED_WITH_ERROR; | |
540 } | |
541 | |
542 SpeechRecognitionManagerImpl::FSMState | |
543 SpeechRecognitionManagerImpl::SessionDelete(Session& session, | |
544 const FSMEventArgs& event_args) { | |
545 if (interactive_session_id_ == session.id) { | |
546 interactive_session_id_ = 0; | |
547 delegate_->DoClose(session.id); | |
548 } | |
549 sessions_.erase(session.id); | |
550 // Next state is ininfluent, the session will be deleted afterwards. | |
551 return STATE_ENDED_WITH_ERROR; | |
552 } | |
553 | |
554 SpeechRecognitionManagerImpl::FSMState | |
555 SpeechRecognitionManagerImpl::SessionSetDetachable( | |
556 Session& session, const FSMEventArgs& event_args) { | |
557 return STATE_DETACHABLE; | |
558 } | |
559 | |
560 SpeechRecognitionManagerImpl::FSMState | |
561 SpeechRecognitionManagerImpl::DoNothing(Session& session, | |
562 const FSMEventArgs& event_args) { | |
563 return session.state; | |
564 } | |
565 | |
566 SpeechRecognitionManagerImpl::FSMState | |
567 SpeechRecognitionManagerImpl::NotFeasible(Session& session, | |
568 const FSMEventArgs& event_args) { | |
569 NOTREACHED() << "Unfeasible event " << event_args.event | |
570 << " in state " << session.state | |
571 << " for session " << session.id; | |
572 return session.state; | |
573 } | |
574 | |
575 int SpeechRecognitionManagerImpl::GetNextSessionID() { | |
576 ++last_session_id_; | |
577 // Deal with wrapping of last_session_id_. (How civilized). | |
578 if (last_session_id_ <= 0) | |
579 last_session_id_ = 1; | |
580 return last_session_id_; | |
581 } | |
582 | |
583 bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const { | |
584 return sessions_.find(session_id) != sessions_.end(); | |
585 } | |
586 | |
587 SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( | |
588 int session_id) const { | |
589 return sessions_.find(session_id)->second.event_listener; | |
590 } | |
591 | |
84 | 592 |
85 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { | 593 bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { |
86 return BrowserMainLoop::GetAudioManager()->HasAudioInputDevices(); | 594 return BrowserMainLoop::GetAudioManager()->HasAudioInputDevices(); |
87 } | 595 } |
88 | 596 |
89 bool SpeechRecognitionManagerImpl::IsCapturingAudio() { | 597 bool SpeechRecognitionManagerImpl::IsCapturingAudio() { |
90 return BrowserMainLoop::GetAudioManager()->IsRecordingInProcess(); | 598 return BrowserMainLoop::GetAudioManager()->IsRecordingInProcess(); |
91 } | 599 } |
92 | 600 |
93 string16 SpeechRecognitionManagerImpl::GetAudioInputDeviceModel() { | 601 string16 SpeechRecognitionManagerImpl::GetAudioInputDeviceModel() { |
94 return BrowserMainLoop::GetAudioManager()->GetAudioInputDeviceModel(); | 602 return BrowserMainLoop::GetAudioManager()->GetAudioInputDeviceModel(); |
95 } | 603 } |
96 | 604 |
97 bool SpeechRecognitionManagerImpl::HasPendingRequest(int session_id) const { | |
98 return requests_.find(session_id) != requests_.end(); | |
99 } | |
100 | |
101 InputTagSpeechDispatcherHost* SpeechRecognitionManagerImpl::GetDelegate( | |
102 int session_id) const { | |
103 return requests_.find(session_id)->second.delegate; | |
104 } | |
105 | |
106 void SpeechRecognitionManagerImpl::ShowAudioInputSettings() { | 605 void SpeechRecognitionManagerImpl::ShowAudioInputSettings() { |
107 // Since AudioManager::ShowAudioInputSettings can potentially launch external | 606 // Since AudioManager::ShowAudioInputSettings can potentially launch external |
108 // processes, do that in the FILE thread to not block the calling threads. | 607 // processes, do that in the FILE thread to not block the calling threads. |
109 if (!BrowserThread::CurrentlyOn(BrowserThread::FILE)) { | 608 if (!BrowserThread::CurrentlyOn(BrowserThread::FILE)) { |
110 BrowserThread::PostTask( | 609 BrowserThread::PostTask( |
111 BrowserThread::FILE, FROM_HERE, | 610 BrowserThread::FILE, FROM_HERE, |
112 base::Bind(&SpeechRecognitionManagerImpl::ShowAudioInputSettings, | 611 base::Bind(&SpeechRecognitionManagerImpl::ShowAudioInputSettings, |
113 base::Unretained(this))); | 612 base::Unretained(this))); |
114 return; | 613 return; |
115 } | 614 } |
116 | 615 |
117 media::AudioManager* audio_manager = BrowserMainLoop::GetAudioManager(); | 616 AudioManager* audio_manager = BrowserMainLoop::GetAudioManager(); |
118 DCHECK(audio_manager->CanShowAudioInputSettings()); | 617 DCHECK(audio_manager->CanShowAudioInputSettings()); |
119 if (audio_manager->CanShowAudioInputSettings()) | 618 if (audio_manager->CanShowAudioInputSettings()) |
120 audio_manager->ShowAudioInputSettings(); | 619 audio_manager->ShowAudioInputSettings(); |
121 } | 620 } |
122 | 621 |
123 void SpeechRecognitionManagerImpl::StartRecognition( | 622 SpeechRecognitionManagerImpl::FSMEventArgs::FSMEventArgs(FSMEvent event_value) |
124 InputTagSpeechDispatcherHost* delegate, | 623 : event(event_value), |
125 int session_id, | 624 speech_result(NULL), |
126 int render_process_id, | 625 speech_error(NULL) { |
127 int render_view_id, | 626 } |
128 const gfx::Rect& element_rect, | 627 |
129 const std::string& language, | 628 SpeechRecognitionManagerImpl::FSMEventArgs::~FSMEventArgs() { |
130 const std::string& grammar, | 629 } |
131 const std::string& origin_url, | 630 |
132 net::URLRequestContextGetter* context_getter, | 631 SpeechRecognitionManagerImpl::Session::Session() |
133 content::SpeechRecognitionPreferences* recognition_prefs) { | 632 : id(0), |
134 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | 633 event_listener(NULL), |
135 BrowserThread::PostTask( | 634 state(STATE_IDLE), |
136 BrowserThread::UI, FROM_HERE, | 635 error_occurred(false) { |
137 base::Bind( | 636 } |
138 &SpeechRecognitionManagerImpl::CheckRenderViewTypeAndStartRecognition, | 637 |
139 base::Unretained(this), | 638 SpeechRecognitionManagerImpl::Session::~Session() { |
140 SpeechRecognitionParams( | |
141 delegate, session_id, render_process_id, render_view_id, | |
142 element_rect, language, grammar, origin_url, context_getter, | |
143 recognition_prefs))); | |
144 } | |
145 | |
146 void SpeechRecognitionManagerImpl::CheckRenderViewTypeAndStartRecognition( | |
147 const SpeechRecognitionParams& params) { | |
148 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); | |
149 | |
150 RenderViewHostImpl* render_view_host = RenderViewHostImpl::FromID( | |
151 params.render_process_id, params.render_view_id); | |
152 if (!render_view_host || !render_view_host->GetDelegate()) | |
153 return; | |
154 | |
155 // For host delegates other than TabContents we can't reliably show a popup, | |
156 // including the speech input bubble. In these cases for privacy reasons we | |
157 // don't want to start recording if the user can't be properly notified. | |
158 // An example of this is trying to show the speech input bubble within an | |
159 // extension popup: http://crbug.com/92083. In these situations the speech | |
160 // input extension API should be used instead. | |
161 if (render_view_host->GetDelegate()->GetRenderViewType() == | |
162 content::VIEW_TYPE_TAB_CONTENTS) { | |
163 BrowserThread::PostTask( | |
164 BrowserThread::IO, FROM_HERE, | |
165 base::Bind(&SpeechRecognitionManagerImpl::ProceedStartingRecognition, | |
166 base::Unretained(this), params)); | |
167 } | |
168 } | |
169 | |
170 void SpeechRecognitionManagerImpl::ProceedStartingRecognition( | |
171 const SpeechRecognitionParams& params) { | |
172 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); | |
173 DCHECK(!HasPendingRequest(params.session_id)); | |
174 | |
175 if (delegate_.get()) { | |
176 delegate_->ShowRecognitionRequested( | |
177 params.session_id, params.render_process_id, params.render_view_id, | |
178 params.element_rect); | |
179 delegate_->GetRequestInfo(&can_report_metrics_, &request_info_); | |
180 } | |
181 | |
182 Request* request = &requests_[params.session_id]; | |
183 request->delegate = params.delegate; | |
184 request->recognizer = content::SpeechRecognizer::Create( | |
185 this, params.session_id, params.language, params.grammar, | |
186 params.context_getter, params.recognition_prefs->FilterProfanities(), | |
187 request_info_, can_report_metrics_ ? params.origin_url : ""); | |
188 request->is_active = false; | |
189 | |
190 StartRecognitionForRequest(params.session_id); | |
191 } | |
192 | |
193 void SpeechRecognitionManagerImpl::StartRecognitionForRequest(int session_id) { | |
194 SpeechRecognizerMap::iterator request = requests_.find(session_id); | |
195 if (request == requests_.end()) { | |
196 NOTREACHED(); | |
197 return; | |
198 } | |
199 | |
200 // We should not currently be recording for the session. | |
201 CHECK(recording_session_id_ != session_id); | |
202 | |
203 // If we are currently recording audio for another session, abort it cleanly. | |
204 if (recording_session_id_) | |
205 CancelRecognitionAndInformDelegate(recording_session_id_); | |
206 recording_session_id_ = session_id; | |
207 requests_[session_id].is_active = true; | |
208 requests_[session_id].recognizer->StartRecognition(); | |
209 if (delegate_.get()) | |
210 delegate_->ShowWarmUp(session_id); | |
211 } | |
212 | |
213 void SpeechRecognitionManagerImpl::CancelRecognitionForRequest(int session_id) { | |
214 // Ignore if the session id was not in our active recognizers list because the | |
215 // user might have clicked more than once, or recognition could have been | |
216 // ended due to other reasons before the user click was processed. | |
217 if (!HasPendingRequest(session_id)) | |
218 return; | |
219 | |
220 CancelRecognitionAndInformDelegate(session_id); | |
221 } | |
222 | |
223 void SpeechRecognitionManagerImpl::FocusLostForRequest(int session_id) { | |
224 // See above comment. | |
225 if (!HasPendingRequest(session_id)) | |
226 return; | |
227 | |
228 // If this is an ongoing recording or if we were displaying an error message | |
229 // to the user, abort it since user has switched focus. Otherwise | |
230 // recognition has started and keep that going so user can start speaking to | |
231 // another element while this gets the results in parallel. | |
232 if (recording_session_id_ == session_id || !requests_[session_id].is_active) | |
233 CancelRecognitionAndInformDelegate(session_id); | |
234 } | |
235 | |
236 void SpeechRecognitionManagerImpl::CancelRecognition(int session_id) { | |
237 DCHECK(HasPendingRequest(session_id)); | |
238 if (requests_[session_id].is_active) | |
239 requests_[session_id].recognizer->AbortRecognition(); | |
240 requests_.erase(session_id); | |
241 if (recording_session_id_ == session_id) | |
242 recording_session_id_ = 0; | |
243 if (delegate_.get()) | |
244 delegate_->DoClose(session_id); | |
245 } | |
246 | |
247 void SpeechRecognitionManagerImpl::CancelAllRequestsWithDelegate( | |
248 InputTagSpeechDispatcherHost* delegate) { | |
249 SpeechRecognizerMap::iterator it = requests_.begin(); | |
250 while (it != requests_.end()) { | |
251 if (it->second.delegate == delegate) { | |
252 CancelRecognition(it->first); | |
253 // This map will have very few elements so it is simpler to restart. | |
254 it = requests_.begin(); | |
255 } else { | |
256 ++it; | |
257 } | |
258 } | |
259 } | |
260 | |
261 void SpeechRecognitionManagerImpl::StopRecording(int session_id) { | |
262 // No pending requests on extension popups. | |
263 if (!HasPendingRequest(session_id)) | |
264 return; | |
265 | |
266 requests_[session_id].recognizer->StopAudioCapture(); | |
267 } | |
268 | |
269 // -------- SpeechRecognitionEventListener interface implementation. --------- | |
270 | |
271 void SpeechRecognitionManagerImpl::OnRecognitionResult( | |
272 int session_id, const content::SpeechRecognitionResult& result) { | |
273 DCHECK(HasPendingRequest(session_id)); | |
274 GetDelegate(session_id)->SetRecognitionResult(session_id, result); | |
275 } | |
276 | |
277 void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id) { | |
278 if (recording_session_id_ != session_id) | |
279 return; | |
280 DCHECK_EQ(recording_session_id_, session_id); | |
281 DCHECK(HasPendingRequest(session_id)); | |
282 if (!requests_[session_id].is_active) | |
283 return; | |
284 recording_session_id_ = 0; | |
285 GetDelegate(session_id)->DidCompleteRecording(session_id); | |
286 if (delegate_.get()) | |
287 delegate_->ShowRecognizing(session_id); | |
288 } | |
289 | |
290 void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { | |
291 if (!HasPendingRequest(session_id) || !requests_[session_id].is_active) | |
292 return; | |
293 GetDelegate(session_id)->DidCompleteRecognition(session_id); | |
294 requests_.erase(session_id); | |
295 if (delegate_.get()) | |
296 delegate_->DoClose(session_id); | |
297 } | |
298 | |
299 void SpeechRecognitionManagerImpl::OnSoundStart(int session_id) { | |
300 } | |
301 | |
302 void SpeechRecognitionManagerImpl::OnSoundEnd(int session_id) { | |
303 } | |
304 | |
305 void SpeechRecognitionManagerImpl::OnRecognitionError( | |
306 int session_id, const content::SpeechRecognitionError& error) { | |
307 DCHECK(HasPendingRequest(session_id)); | |
308 if (session_id == recording_session_id_) | |
309 recording_session_id_ = 0; | |
310 requests_[session_id].is_active = false; | |
311 if (delegate_.get()) { | |
312 if (error.code == content::SPEECH_RECOGNITION_ERROR_AUDIO && | |
313 error.details == content::SPEECH_AUDIO_ERROR_DETAILS_NO_MIC) { | |
314 delegate_->ShowMicError(session_id, | |
315 SpeechRecognitionManagerDelegate::MIC_ERROR_NO_DEVICE_AVAILABLE); | |
316 } else if (error.code == content::SPEECH_RECOGNITION_ERROR_AUDIO && | |
317 error.details == content::SPEECH_AUDIO_ERROR_DETAILS_IN_USE) { | |
318 delegate_->ShowMicError(session_id, | |
319 SpeechRecognitionManagerDelegate::MIC_ERROR_DEVICE_IN_USE); | |
320 } else { | |
321 delegate_->ShowRecognizerError(session_id, error.code); | |
322 } | |
323 } | |
324 } | |
325 | |
326 void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { | |
327 DCHECK(HasPendingRequest(session_id)); | |
328 DCHECK_EQ(recording_session_id_, session_id); | |
329 if (delegate_.get()) | |
330 delegate_->ShowRecording(session_id); | |
331 } | |
332 | |
333 void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { | |
334 } | |
335 | |
336 void SpeechRecognitionManagerImpl::OnEnvironmentEstimationComplete( | |
337 int session_id) { | |
338 DCHECK(HasPendingRequest(session_id)); | |
339 DCHECK_EQ(recording_session_id_, session_id); | |
340 } | |
341 | |
342 void SpeechRecognitionManagerImpl::OnAudioLevelsChange( | |
343 int session_id, float volume, float noise_volume) { | |
344 DCHECK(HasPendingRequest(session_id)); | |
345 DCHECK_EQ(recording_session_id_, session_id); | |
346 if (delegate_.get()) | |
347 delegate_->ShowInputVolume(session_id, volume, noise_volume); | |
348 } | |
349 | |
350 void SpeechRecognitionManagerImpl::CancelRecognitionAndInformDelegate( | |
351 int session_id) { | |
352 InputTagSpeechDispatcherHost* cur_delegate = GetDelegate(session_id); | |
353 CancelRecognition(session_id); | |
354 cur_delegate->DidCompleteRecording(session_id); | |
355 cur_delegate->DidCompleteRecognition(session_id); | |
356 } | |
357 | |
358 SpeechRecognitionManagerImpl::Request::Request() | |
359 : is_active(false) { | |
360 } | |
361 | |
362 SpeechRecognitionManagerImpl::Request::~Request() { | |
363 } | 639 } |
364 | 640 |
365 } // namespace speech | 641 } // namespace speech |
OLD | NEW |