OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/speech_recognition_dispatcher.h" | 5 #include "content/renderer/speech_recognition_dispatcher.h" |
6 | 6 |
7 #include "base/basictypes.h" | 7 #include "base/basictypes.h" |
8 #include "base/strings/utf_string_conversions.h" | 8 #include "base/strings/utf_string_conversions.h" |
9 #include "content/common/speech_recognition_messages.h" | 9 #include "content/common/speech_recognition_messages.h" |
10 #include "content/renderer/render_view_impl.h" | 10 #include "content/renderer/render_view_impl.h" |
(...skipping 17 matching lines...) Expand all Loading... |
28 SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( | 28 SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( |
29 RenderViewImpl* render_view) | 29 RenderViewImpl* render_view) |
30 : RenderViewObserver(render_view), | 30 : RenderViewObserver(render_view), |
31 recognizer_client_(NULL), | 31 recognizer_client_(NULL), |
32 next_id_(1) { | 32 next_id_(1) { |
33 } | 33 } |
34 | 34 |
35 SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { | 35 SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { |
36 } | 36 } |
37 | 37 |
| 38 void SpeechRecognitionDispatcher::AbortAllRecognitions() { |
| 39 for (HandleMap::iterator iter = handle_map_.begin(); |
| 40 iter != handle_map_.end(); |
| 41 ++iter) { |
| 42 // OnEnd event will be sent to the SpeechRecognition object later. |
| 43 abort(iter->second, recognizer_client_); |
| 44 } |
| 45 } |
| 46 |
38 bool SpeechRecognitionDispatcher::OnMessageReceived( | 47 bool SpeechRecognitionDispatcher::OnMessageReceived( |
39 const IPC::Message& message) { | 48 const IPC::Message& message) { |
40 bool handled = true; | 49 bool handled = true; |
41 IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message) | 50 IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message) |
42 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted) | 51 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted) |
43 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted) | 52 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted) |
44 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted) | 53 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted) |
45 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded) | 54 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded) |
46 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded) | 55 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded) |
47 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred) | 56 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred) |
48 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) | 57 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) |
49 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, | 58 IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, |
50 OnResultsRetrieved) | 59 OnResultsRetrieved) |
51 IPC_MESSAGE_UNHANDLED(handled = false) | 60 IPC_MESSAGE_UNHANDLED(handled = false) |
52 IPC_END_MESSAGE_MAP() | 61 IPC_END_MESSAGE_MAP() |
53 return handled; | 62 return handled; |
54 } | 63 } |
55 | 64 |
56 void SpeechRecognitionDispatcher::start( | 65 void SpeechRecognitionDispatcher::start( |
57 const WebSpeechRecognitionHandle& handle, | 66 const WebSpeechRecognitionHandle& handle, |
58 const WebSpeechRecognitionParams& params, | 67 const WebSpeechRecognitionParams& params, |
59 WebSpeechRecognizerClient* recognizer_client) { | 68 WebSpeechRecognizerClient* recognizer_client) { |
60 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); | 69 DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); |
| 70 |
61 recognizer_client_ = recognizer_client; | 71 recognizer_client_ = recognizer_client; |
62 | 72 |
63 SpeechRecognitionHostMsg_StartRequest_Params msg_params; | 73 SpeechRecognitionHostMsg_StartRequest_Params msg_params; |
64 for (size_t i = 0; i < params.grammars().size(); ++i) { | 74 for (size_t i = 0; i < params.grammars().size(); ++i) { |
65 const WebSpeechGrammar& grammar = params.grammars()[i]; | 75 const WebSpeechGrammar& grammar = params.grammars()[i]; |
66 msg_params.grammars.push_back( | 76 msg_params.grammars.push_back( |
67 SpeechRecognitionGrammar(grammar.src().spec(), grammar.weight())); | 77 SpeechRecognitionGrammar(grammar.src().spec(), grammar.weight())); |
68 } | 78 } |
69 msg_params.language = base::UTF16ToUTF8(params.language()); | 79 msg_params.language = base::UTF16ToUTF8(params.language()); |
70 msg_params.max_hypotheses = static_cast<uint32>(params.maxAlternatives()); | 80 msg_params.max_hypotheses = static_cast<uint32>(params.maxAlternatives()); |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
199 transcripts[i] = result.hypotheses[i].utterance; | 209 transcripts[i] = result.hypotheses[i].utterance; |
200 confidences[i] = static_cast<float>(result.hypotheses[i].confidence); | 210 confidences[i] = static_cast<float>(result.hypotheses[i].confidence); |
201 } | 211 } |
202 webkit_result->assign(transcripts, confidences, !result.is_provisional); | 212 webkit_result->assign(transcripts, confidences, !result.is_provisional); |
203 } | 213 } |
204 | 214 |
205 recognizer_client_->didReceiveResults( | 215 recognizer_client_->didReceiveResults( |
206 GetHandleFromID(request_id), final, provisional); | 216 GetHandleFromID(request_id), final, provisional); |
207 } | 217 } |
208 | 218 |
209 | |
210 int SpeechRecognitionDispatcher::GetOrCreateIDForHandle( | 219 int SpeechRecognitionDispatcher::GetOrCreateIDForHandle( |
211 const WebSpeechRecognitionHandle& handle) { | 220 const WebSpeechRecognitionHandle& handle) { |
212 // Search first for an existing mapping. | 221 // Search first for an existing mapping. |
213 for (HandleMap::iterator iter = handle_map_.begin(); | 222 for (HandleMap::iterator iter = handle_map_.begin(); |
214 iter != handle_map_.end(); | 223 iter != handle_map_.end(); |
215 ++iter) { | 224 ++iter) { |
216 if (iter->second.equals(handle)) | 225 if (iter->second.equals(handle)) |
217 return iter->first; | 226 return iter->first; |
218 } | 227 } |
219 // If no existing mapping found, create a new one. | 228 // If no existing mapping found, create a new one. |
(...skipping 15 matching lines...) Expand all Loading... |
235 } | 244 } |
236 | 245 |
237 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( | 246 const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( |
238 int request_id) { | 247 int request_id) { |
239 HandleMap::iterator iter = handle_map_.find(request_id); | 248 HandleMap::iterator iter = handle_map_.find(request_id); |
240 DCHECK(iter != handle_map_.end()); | 249 DCHECK(iter != handle_map_.end()); |
241 return iter->second; | 250 return iter->second; |
242 } | 251 } |
243 | 252 |
244 } // namespace content | 253 } // namespace content |
OLD | NEW |