Index: content/renderer/speech_recognition_dispatcher.cc |
diff --git a/content/renderer/speech_recognition_dispatcher.cc b/content/renderer/speech_recognition_dispatcher.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..609fcfd0a17f5c7f3237cc5ba7c7cc4fc65e8f78 |
--- /dev/null |
+++ b/content/renderer/speech_recognition_dispatcher.cc |
@@ -0,0 +1,180 @@ |
+// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/renderer/speech_recognition_dispatcher.h" |
+ |
+#include "base/basictypes.h" |
+#include "base/utf_string_conversions.h" |
+#include "content/common/speech_recognition_messages.h" |
+#include "content/renderer/render_view_impl.h" |
+#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebString.h" |
+#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h" |
+#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechGrammar.h" |
+#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionParams.h" |
+#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionResult.h" |
+#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizer.h" |
+#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizerClient.h" |
+ |
+using content::SpeechRecognitionError; |
+using content::SpeechRecognitionResult; |
+using WebKit::WebVector; |
+using WebKit::WebString; |
+using WebKit::WebSpeechGrammar; |
+using WebKit::WebSpeechRecognitionHandle; |
+using WebKit::WebSpeechRecognitionResult; |
+using WebKit::WebSpeechRecognitionParams; |
+using WebKit::WebSpeechRecognizerClient; |
+ |
+SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( |
+ RenderViewImpl* render_view) |
+ : content::RenderViewObserver(render_view), |
+ recognizer_client_(NULL), |
+ last_mapping_id_(0) { |
+} |
+ |
+SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { |
+} |
+ |
+bool SpeechRecognitionDispatcher::OnMessageReceived( |
+ const IPC::Message& message) { |
+ bool handled = true; |
+ IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message) |
+ IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted) |
+ IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted) |
+ IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted) |
+ IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded) |
+ IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded) |
+ IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred,OnErrorOccurred) |
+ IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) |
+ IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, |
+ OnResultRetrieved) |
+ IPC_MESSAGE_UNHANDLED(handled = false) |
+ IPC_END_MESSAGE_MAP() |
+ return handled; |
+} |
+ |
+void SpeechRecognitionDispatcher::start( |
+ const WebSpeechRecognitionHandle& handle, |
+ const WebSpeechRecognitionParams& params, |
+ WebSpeechRecognizerClient* recognizer_client) { |
+ //TODO(primiano) What to do if a start is issued to an already started object? |
+ DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); |
+ recognizer_client_ = recognizer_client; |
+ |
+ SpeechRecognitionHostMsg_StartRequest_Params msg_params; |
+ for (size_t i = 0; i < params.grammars().size(); ++i) { |
+ const WebSpeechGrammar& grammar = params.grammars()[i]; |
+ msg_params.grammars.push_back( |
+ content::SpeechRecognitionGrammar(grammar.src().spec(), |
+ grammar.weight())); |
+ } |
+ msg_params.language = UTF16ToUTF8(params.language()); |
+ msg_params.is_one_shot = !params.continuous(); |
+ msg_params.origin_url = ""; // TODO(primiano) we need an origin from WebKit. |
+ msg_params.render_view_id = routing_id(); |
+ msg_params.js_handle_id = GetIDForHandle(handle); |
+ Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); |
+} |
+ |
+void SpeechRecognitionDispatcher::stop( |
+ const WebSpeechRecognitionHandle& handle, |
+ WebSpeechRecognizerClient* recognizer_client) { |
+ DCHECK(recognizer_client_ == recognizer_client); |
+ Send(new SpeechRecognitionHostMsg_StopCaptureRequest(routing_id(), |
+ GetIDForHandle(handle))); |
+} |
+ |
+void SpeechRecognitionDispatcher::abort( |
+ const WebSpeechRecognitionHandle& handle, |
+ WebSpeechRecognizerClient* recognizer_client) { |
+ Send(new SpeechRecognitionHostMsg_AbortRequest(routing_id(), |
+ GetIDForHandle(handle))); |
+} |
+ |
+void SpeechRecognitionDispatcher::OnRecognitionStarted(int js_handle_id) { |
+ DCHECK(recognizer_client_); |
+ recognizer_client_->didStart(GetHandleFromID(js_handle_id)); |
+} |
+ |
+void SpeechRecognitionDispatcher::OnAudioStarted(int js_handle_id) { |
+ DCHECK(recognizer_client_); |
+ recognizer_client_->didStartAudio(GetHandleFromID(js_handle_id)); |
+} |
+ |
+void SpeechRecognitionDispatcher::OnSoundStarted(int js_handle_id) { |
+ DCHECK(recognizer_client_); |
+ recognizer_client_->didStartSound(GetHandleFromID(js_handle_id)); |
+} |
+ |
+void SpeechRecognitionDispatcher::OnSoundEnded(int js_handle_id) { |
+ DCHECK(recognizer_client_); |
+ recognizer_client_->didEndSound(GetHandleFromID(js_handle_id)); |
+} |
+ |
+void SpeechRecognitionDispatcher::OnAudioEnded(int js_handle_id) { |
+ DCHECK(recognizer_client_); |
+ recognizer_client_->didEndAudio(GetHandleFromID(js_handle_id)); |
+} |
+ |
+void SpeechRecognitionDispatcher::OnErrorOccurred( |
+ int js_handle_id, const SpeechRecognitionError& error) { |
+ DCHECK(recognizer_client_); |
+ if (error.code == content::SPEECH_RECOGNITION_ERROR_NO_MATCH) { |
+ recognizer_client_->didReceiveNoMatch(GetHandleFromID(js_handle_id), |
+ WebSpeechRecognitionResult()); |
+ } else { |
+ recognizer_client_->didReceiveError(GetHandleFromID(js_handle_id), |
+ WebString(), // TODO(primiano) message? |
+ error.code); |
+ } |
+} |
+ |
+void SpeechRecognitionDispatcher::OnRecognitionEnded(int js_handle_id) { |
+ DCHECK(recognizer_client_); |
+ recognizer_client_->didEnd(GetHandleFromID(js_handle_id)); |
+ handle_map_.erase(js_handle_id); |
+} |
+ |
+void SpeechRecognitionDispatcher::OnResultRetrieved( |
+ int js_handle_id, const SpeechRecognitionResult& result) { |
+ DCHECK(recognizer_client_); |
+ |
+ const size_t num_hypotheses = result.hypotheses.size(); |
+ WebSpeechRecognitionResult webkit_result; |
+ WebVector<WebString> transcripts(num_hypotheses); |
+ WebVector<float> confidences(num_hypotheses); |
+ for (size_t i = 0; i < num_hypotheses; ++i) { |
+ transcripts[i] = result.hypotheses[i].utterance; |
+ confidences[i] = static_cast<float>(result.hypotheses[i].confidence); |
+ } |
+ webkit_result.assign(transcripts, confidences, !result.provisional); |
+ // TODO(primiano) Handle history, currently empty. |
+ WebVector<WebSpeechRecognitionResult> empty_history; |
+ recognizer_client_->didReceiveResult(GetHandleFromID(js_handle_id), |
+ webkit_result, |
+ 0, // result_index |
+ empty_history); |
+} |
+ |
+int SpeechRecognitionDispatcher::GetIDForHandle( |
+ const WebSpeechRecognitionHandle& handle) { |
+ // Search first for an existing mapping. |
+ for (HandleMap::iterator iter = handle_map_.begin(); |
+ iter != handle_map_.end(); |
+ ++iter) { |
+ if (iter->second.equals(handle)) |
+ return iter->first; |
+ } |
+ // If no existing mapping found, create a new one. |
+ ++last_mapping_id_; |
+ handle_map_[last_mapping_id_] = handle; |
+ return last_mapping_id_; |
+} |
+ |
+const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( |
+ int js_handle_id) { |
+ HandleMap::iterator iter = handle_map_.find(js_handle_id); |
+ DCHECK(iter != handle_map_.end()); |
+ return iter->second; |
+} |