| OLD | NEW |
| (Empty) |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/shell/renderer/test_runner/mock_web_speech_recognizer.h" | |
| 6 | |
| 7 #include "base/logging.h" | |
| 8 #include "content/shell/renderer/test_runner/web_test_delegate.h" | |
| 9 #include "third_party/WebKit/public/web/WebSpeechRecognitionResult.h" | |
| 10 #include "third_party/WebKit/public/web/WebSpeechRecognizerClient.h" | |
| 11 | |
| 12 namespace content { | |
| 13 | |
| 14 namespace { | |
| 15 | |
| 16 // Task class for calling a client function that does not take any parameters. | |
| 17 typedef void (blink::WebSpeechRecognizerClient::*ClientFunctionPointer)( | |
| 18 const blink::WebSpeechRecognitionHandle&); | |
| 19 class ClientCallTask : public MockWebSpeechRecognizer::Task { | |
| 20 public: | |
| 21 ClientCallTask(MockWebSpeechRecognizer* mock, ClientFunctionPointer function) | |
| 22 : MockWebSpeechRecognizer::Task(mock), function_(function) {} | |
| 23 | |
| 24 ~ClientCallTask() override {} | |
| 25 | |
| 26 void run() override { | |
| 27 (recognizer_->Client()->*function_)(recognizer_->Handle()); | |
| 28 } | |
| 29 | |
| 30 private: | |
| 31 ClientFunctionPointer function_; | |
| 32 | |
| 33 DISALLOW_COPY_AND_ASSIGN(ClientCallTask); | |
| 34 }; | |
| 35 | |
| 36 // Task for delivering a result event. | |
| 37 class ResultTask : public MockWebSpeechRecognizer::Task { | |
| 38 public: | |
| 39 ResultTask(MockWebSpeechRecognizer* mock, | |
| 40 const blink::WebString transcript, | |
| 41 float confidence) | |
| 42 : MockWebSpeechRecognizer::Task(mock), | |
| 43 transcript_(transcript), | |
| 44 confidence_(confidence) {} | |
| 45 | |
| 46 ~ResultTask() override {} | |
| 47 | |
| 48 void run() override { | |
| 49 blink::WebVector<blink::WebString> transcripts(static_cast<size_t>(1)); | |
| 50 blink::WebVector<float> confidences(static_cast<size_t>(1)); | |
| 51 transcripts[0] = transcript_; | |
| 52 confidences[0] = confidence_; | |
| 53 blink::WebVector<blink::WebSpeechRecognitionResult> final_results( | |
| 54 static_cast<size_t>(1)); | |
| 55 blink::WebVector<blink::WebSpeechRecognitionResult> interim_results; | |
| 56 final_results[0].assign(transcripts, confidences, true); | |
| 57 | |
| 58 recognizer_->Client()->didReceiveResults( | |
| 59 recognizer_->Handle(), final_results, interim_results); | |
| 60 } | |
| 61 | |
| 62 private: | |
| 63 blink::WebString transcript_; | |
| 64 float confidence_; | |
| 65 | |
| 66 DISALLOW_COPY_AND_ASSIGN(ResultTask); | |
| 67 }; | |
| 68 | |
| 69 // Task for delivering a nomatch event. | |
| 70 class NoMatchTask : public MockWebSpeechRecognizer::Task { | |
| 71 public: | |
| 72 NoMatchTask(MockWebSpeechRecognizer* mock) | |
| 73 : MockWebSpeechRecognizer::Task(mock) {} | |
| 74 | |
| 75 ~NoMatchTask() override {} | |
| 76 | |
| 77 void run() override { | |
| 78 recognizer_->Client()->didReceiveNoMatch( | |
| 79 recognizer_->Handle(), blink::WebSpeechRecognitionResult()); | |
| 80 } | |
| 81 | |
| 82 private: | |
| 83 DISALLOW_COPY_AND_ASSIGN(NoMatchTask); | |
| 84 }; | |
| 85 | |
| 86 // Task for delivering an error event. | |
| 87 class ErrorTask : public MockWebSpeechRecognizer::Task { | |
| 88 public: | |
| 89 ErrorTask(MockWebSpeechRecognizer* mock, | |
| 90 blink::WebSpeechRecognizerClient::ErrorCode code, | |
| 91 const blink::WebString& message) | |
| 92 : MockWebSpeechRecognizer::Task(mock), code_(code), message_(message) {} | |
| 93 | |
| 94 ~ErrorTask() override {} | |
| 95 | |
| 96 void run() override { | |
| 97 recognizer_->Client()->didReceiveError( | |
| 98 recognizer_->Handle(), message_, code_); | |
| 99 } | |
| 100 | |
| 101 private: | |
| 102 blink::WebSpeechRecognizerClient::ErrorCode code_; | |
| 103 blink::WebString message_; | |
| 104 | |
| 105 DISALLOW_COPY_AND_ASSIGN(ErrorTask); | |
| 106 }; | |
| 107 | |
| 108 } // namespace | |
| 109 | |
| 110 MockWebSpeechRecognizer::MockWebSpeechRecognizer() | |
| 111 : was_aborted_(false), task_queue_running_(false), delegate_(0) { | |
| 112 } | |
| 113 | |
| 114 MockWebSpeechRecognizer::~MockWebSpeechRecognizer() { | |
| 115 ClearTaskQueue(); | |
| 116 } | |
| 117 | |
| 118 void MockWebSpeechRecognizer::SetDelegate(WebTestDelegate* delegate) { | |
| 119 delegate_ = delegate; | |
| 120 } | |
| 121 | |
| 122 void MockWebSpeechRecognizer::start( | |
| 123 const blink::WebSpeechRecognitionHandle& handle, | |
| 124 const blink::WebSpeechRecognitionParams& params, | |
| 125 blink::WebSpeechRecognizerClient* client) { | |
| 126 was_aborted_ = false; | |
| 127 handle_ = handle; | |
| 128 client_ = client; | |
| 129 | |
| 130 task_queue_.push_back( | |
| 131 new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didStart)); | |
| 132 task_queue_.push_back(new ClientCallTask( | |
| 133 this, &blink::WebSpeechRecognizerClient::didStartAudio)); | |
| 134 task_queue_.push_back(new ClientCallTask( | |
| 135 this, &blink::WebSpeechRecognizerClient::didStartSound)); | |
| 136 | |
| 137 if (!mock_transcripts_.empty()) { | |
| 138 DCHECK_EQ(mock_transcripts_.size(), mock_confidences_.size()); | |
| 139 | |
| 140 for (size_t i = 0; i < mock_transcripts_.size(); ++i) | |
| 141 task_queue_.push_back( | |
| 142 new ResultTask(this, mock_transcripts_[i], mock_confidences_[i])); | |
| 143 | |
| 144 mock_transcripts_.clear(); | |
| 145 mock_confidences_.clear(); | |
| 146 } else | |
| 147 task_queue_.push_back(new NoMatchTask(this)); | |
| 148 | |
| 149 task_queue_.push_back( | |
| 150 new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didEndSound)); | |
| 151 task_queue_.push_back( | |
| 152 new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didEndAudio)); | |
| 153 task_queue_.push_back( | |
| 154 new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didEnd)); | |
| 155 | |
| 156 StartTaskQueue(); | |
| 157 } | |
| 158 | |
| 159 void MockWebSpeechRecognizer::stop( | |
| 160 const blink::WebSpeechRecognitionHandle& handle, | |
| 161 blink::WebSpeechRecognizerClient* client) { | |
| 162 handle_ = handle; | |
| 163 client_ = client; | |
| 164 | |
| 165 // FIXME: Implement. | |
| 166 NOTREACHED(); | |
| 167 } | |
| 168 | |
| 169 void MockWebSpeechRecognizer::abort( | |
| 170 const blink::WebSpeechRecognitionHandle& handle, | |
| 171 blink::WebSpeechRecognizerClient* client) { | |
| 172 handle_ = handle; | |
| 173 client_ = client; | |
| 174 | |
| 175 ClearTaskQueue(); | |
| 176 was_aborted_ = true; | |
| 177 task_queue_.push_back( | |
| 178 new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didEnd)); | |
| 179 StartTaskQueue(); | |
| 180 } | |
| 181 | |
| 182 void MockWebSpeechRecognizer::AddMockResult(const blink::WebString& transcript, | |
| 183 float confidence) { | |
| 184 mock_transcripts_.push_back(transcript); | |
| 185 mock_confidences_.push_back(confidence); | |
| 186 } | |
| 187 | |
| 188 void MockWebSpeechRecognizer::SetError(const blink::WebString& error, | |
| 189 const blink::WebString& message) { | |
| 190 blink::WebSpeechRecognizerClient::ErrorCode code; | |
| 191 if (error == "OtherError") | |
| 192 code = blink::WebSpeechRecognizerClient::OtherError; | |
| 193 else if (error == "NoSpeechError") | |
| 194 code = blink::WebSpeechRecognizerClient::NoSpeechError; | |
| 195 else if (error == "AbortedError") | |
| 196 code = blink::WebSpeechRecognizerClient::AbortedError; | |
| 197 else if (error == "AudioCaptureError") | |
| 198 code = blink::WebSpeechRecognizerClient::AudioCaptureError; | |
| 199 else if (error == "NetworkError") | |
| 200 code = blink::WebSpeechRecognizerClient::NetworkError; | |
| 201 else if (error == "NotAllowedError") | |
| 202 code = blink::WebSpeechRecognizerClient::NotAllowedError; | |
| 203 else if (error == "ServiceNotAllowedError") | |
| 204 code = blink::WebSpeechRecognizerClient::ServiceNotAllowedError; | |
| 205 else if (error == "BadGrammarError") | |
| 206 code = blink::WebSpeechRecognizerClient::BadGrammarError; | |
| 207 else if (error == "LanguageNotSupportedError") | |
| 208 code = blink::WebSpeechRecognizerClient::LanguageNotSupportedError; | |
| 209 else | |
| 210 return; | |
| 211 | |
| 212 ClearTaskQueue(); | |
| 213 task_queue_.push_back(new ErrorTask(this, code, message)); | |
| 214 task_queue_.push_back( | |
| 215 new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didEnd)); | |
| 216 StartTaskQueue(); | |
| 217 } | |
| 218 | |
| 219 void MockWebSpeechRecognizer::StartTaskQueue() { | |
| 220 if (task_queue_running_) | |
| 221 return; | |
| 222 delegate_->PostTask(new StepTask(this)); | |
| 223 task_queue_running_ = true; | |
| 224 } | |
| 225 | |
| 226 void MockWebSpeechRecognizer::ClearTaskQueue() { | |
| 227 while (!task_queue_.empty()) { | |
| 228 delete task_queue_.front(); | |
| 229 task_queue_.pop_front(); | |
| 230 } | |
| 231 task_queue_running_ = false; | |
| 232 } | |
| 233 | |
| 234 void MockWebSpeechRecognizer::StepTask::RunIfValid() { | |
| 235 if (object_->task_queue_.empty()) { | |
| 236 object_->task_queue_running_ = false; | |
| 237 return; | |
| 238 } | |
| 239 | |
| 240 Task* task = object_->task_queue_.front(); | |
| 241 object_->task_queue_.pop_front(); | |
| 242 task->run(); | |
| 243 delete task; | |
| 244 | |
| 245 if (object_->task_queue_.empty()) { | |
| 246 object_->task_queue_running_ = false; | |
| 247 return; | |
| 248 } | |
| 249 | |
| 250 object_->delegate_->PostTask(new StepTask(object_)); | |
| 251 } | |
| 252 | |
| 253 } // namespace content | |
| OLD | NEW |