Chromium Code Reviews| Index: content/renderer/speech_recognition_dispatcher.cc |
| diff --git a/content/renderer/speech_recognition_dispatcher.cc b/content/renderer/speech_recognition_dispatcher.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..dbcac7f3af80541bbee486682121dee19f311992 |
| --- /dev/null |
| +++ b/content/renderer/speech_recognition_dispatcher.cc |
| @@ -0,0 +1,203 @@ |
| +// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "content/renderer/speech_recognition_dispatcher.h" |
| + |
| +#include "base/basictypes.h" |
| +#include "base/utf_string_conversions.h" |
| +#include "content/common/speech_recognition_messages.h" |
| +#include "content/renderer/render_view_impl.h" |
| +#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebString.h" |
| +#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h" |
| +#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechGrammar.h" |
| +#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionParams.h" |
| +#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionResult.h" |
| +#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizer.h" |
|
jam
2012/05/23 15:41:24
nit: this is already in your header, so take out
Primiano Tucci (use gerrit)
2012/05/23 17:11:56
Done.
|
| +#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizerClient.h" |
| + |
| +using content::SpeechRecognitionError; |
| +using content::SpeechRecognitionResult; |
| +using WebKit::WebVector; |
| +using WebKit::WebString; |
| +using WebKit::WebSpeechGrammar; |
| +using WebKit::WebSpeechRecognitionHandle; |
| +using WebKit::WebSpeechRecognitionResult; |
| +using WebKit::WebSpeechRecognitionParams; |
| +using WebKit::WebSpeechRecognizerClient; |
| + |
| +SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( |
| + RenderViewImpl* render_view) |
| + : content::RenderViewObserver(render_view), |
| + recognizer_client_(NULL), |
| + next_id_(1) { |
| +} |
| + |
| +SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { |
| +} |
| + |
| +bool SpeechRecognitionDispatcher::OnMessageReceived( |
| + const IPC::Message& message) { |
| + bool handled = true; |
| + IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) |
| + IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, OnResultRetrieved) |
| + IPC_MESSAGE_UNHANDLED(handled = false) |
| + IPC_END_MESSAGE_MAP() |
| + return handled; |
| +} |
| + |
| +void SpeechRecognitionDispatcher::start( |
| + const WebSpeechRecognitionHandle& handle, |
| + const WebSpeechRecognitionParams& params, |
| + WebSpeechRecognizerClient* recognizer_client) { |
| + //TODO(primiano) What to do if a start is issued to an already started object? |
| + DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); |
| + recognizer_client_ = recognizer_client; |
| + |
| + SpeechRecognitionHostMsg_StartRequest_Params msg_params; |
| + for (size_t i = 0; i < params.grammars().size(); ++i) { |
| + const WebSpeechGrammar& grammar = params.grammars()[i]; |
| + msg_params.grammars.push_back( |
| + content::SpeechRecognitionGrammar(grammar.src().spec(), |
| + grammar.weight())); |
| + } |
| + msg_params.language = UTF16ToUTF8(params.language()); |
| + msg_params.is_one_shot = !params.continuous(); |
| + msg_params.origin_url = ""; // TODO(primiano) we need an origin from WebKit. |
| + msg_params.render_view_id = routing_id(); |
| + msg_params.request_id = GetIDForHandle(handle); |
| + Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); |
| +} |
| + |
| +void SpeechRecognitionDispatcher::stop( |
| + const WebSpeechRecognitionHandle& handle, |
| + WebSpeechRecognizerClient* recognizer_client) { |
| + DCHECK(recognizer_client_ == recognizer_client); |
| + Send(new SpeechRecognitionHostMsg_StopCaptureRequest(routing_id(), |
| + GetIDForHandle(handle))); |
| +} |
| + |
| +void SpeechRecognitionDispatcher::abort( |
| + const WebSpeechRecognitionHandle& handle, |
| + WebSpeechRecognizerClient* recognizer_client) { |
| + Send(new SpeechRecognitionHostMsg_AbortRequest(routing_id(), |
| + GetIDForHandle(handle))); |
| +} |
| + |
| +void SpeechRecognitionDispatcher::OnRecognitionStarted(int request_id) { |
| + DCHECK(recognizer_client_); |
|
jam
2012/05/23 15:41:24
nit: all these dchecks aren't necessary. if it's n
Primiano Tucci (use gerrit)
2012/05/23 17:11:56
Done.
|
| + recognizer_client_->didStart(GetHandleFromID(request_id)); |
| +} |
| + |
| +void SpeechRecognitionDispatcher::OnAudioStarted(int request_id) { |
| + DCHECK(recognizer_client_); |
| + recognizer_client_->didStartAudio(GetHandleFromID(request_id)); |
| +} |
| + |
| +void SpeechRecognitionDispatcher::OnSoundStarted(int request_id) { |
| + DCHECK(recognizer_client_); |
| + recognizer_client_->didStartSound(GetHandleFromID(request_id)); |
| +} |
| + |
| +void SpeechRecognitionDispatcher::OnSoundEnded(int request_id) { |
| + DCHECK(recognizer_client_); |
| + recognizer_client_->didEndSound(GetHandleFromID(request_id)); |
| +} |
| + |
| +void SpeechRecognitionDispatcher::OnAudioEnded(int request_id) { |
| + DCHECK(recognizer_client_); |
| + recognizer_client_->didEndAudio(GetHandleFromID(request_id)); |
| +} |
| + |
| +void SpeechRecognitionDispatcher::OnErrorOccurred( |
| + int request_id, const SpeechRecognitionError& error) { |
| + DCHECK(recognizer_client_); |
| + if (error.code == content::SPEECH_RECOGNITION_ERROR_NO_MATCH) { |
| + recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id), |
| + WebSpeechRecognitionResult()); |
| + } else { |
| + // TODO(primiano) speech_recognition_error.h must be updated with the new |
| + // API specs soon. |
| + WebSpeechRecognizerClient::ErrorCode wk_error_code; |
| + switch (error.code) { |
| + case content::SPEECH_RECOGNITION_ERROR_ABORTED: |
| + wk_error_code = WebSpeechRecognizerClient::AbortedError; |
| + break; |
| + case content::SPEECH_RECOGNITION_ERROR_AUDIO: |
| + wk_error_code = WebSpeechRecognizerClient::AudioCaptureError; |
| + break; |
| + case content::SPEECH_RECOGNITION_ERROR_NETWORK: |
| + wk_error_code = WebSpeechRecognizerClient::NetworkError; |
| + break; |
| + case content::SPEECH_RECOGNITION_ERROR_NO_SPEECH: |
| + wk_error_code = WebSpeechRecognizerClient::NoSpeechError; |
| + break; |
| + case content::SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR: |
| + wk_error_code = WebSpeechRecognizerClient::BadGrammarError; |
| + break; |
| + default: |
| + NOTREACHED(); |
| + wk_error_code = WebSpeechRecognizerClient::OtherError; |
| + } |
| + recognizer_client_->didReceiveError(GetHandleFromID(request_id), |
| + WebString(), // TODO(primiano) message? |
| + wk_error_code); |
| + } |
| +} |
| + |
| +void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) { |
| + DCHECK(recognizer_client_); |
| + recognizer_client_->didEnd(GetHandleFromID(request_id)); |
| + handle_map_.erase(request_id); |
| +} |
| + |
| +void SpeechRecognitionDispatcher::OnResultRetrieved( |
| + int request_id, const SpeechRecognitionResult& result) { |
| + DCHECK(recognizer_client_); |
| + |
| + const size_t num_hypotheses = result.hypotheses.size(); |
| + WebSpeechRecognitionResult webkit_result; |
| + WebVector<WebString> transcripts(num_hypotheses); |
| + WebVector<float> confidences(num_hypotheses); |
| + for (size_t i = 0; i < num_hypotheses; ++i) { |
| + transcripts[i] = result.hypotheses[i].utterance; |
| + confidences[i] = static_cast<float>(result.hypotheses[i].confidence); |
| + } |
| + webkit_result.assign(transcripts, confidences, !result.provisional); |
| + // TODO(primiano) Handle history, currently empty. |
| + WebVector<WebSpeechRecognitionResult> empty_history; |
| + recognizer_client_->didReceiveResult(GetHandleFromID(request_id), |
| + webkit_result, |
| + 0, // result_index |
| + empty_history); |
| +} |
| + |
| +int SpeechRecognitionDispatcher::GetIDForHandle( |
| + const WebSpeechRecognitionHandle& handle) { |
| + // Search first for an existing mapping. |
| + for (HandleMap::iterator iter = handle_map_.begin(); |
| + iter != handle_map_.end(); |
| + ++iter) { |
| + if (iter->second.equals(handle)) |
| + return iter->first; |
| + } |
| + // If no existing mapping found, create a new one. |
| + const int new_id = next_id_; |
| + handle_map_[new_id] = handle; |
| + ++next_id_; |
| + return new_id; |
| +} |
| + |
| +const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( |
| + int request_id) { |
| + HandleMap::iterator iter = handle_map_.find(request_id); |
| + DCHECK(iter != handle_map_.end()); |
| + return iter->second; |
| +} |