OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "components/test_runner/mock_web_speech_recognizer.h" | |
6 | |
7 #include <stddef.h> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/bind_helpers.h" | |
11 #include "base/logging.h" | |
12 #include "base/macros.h" | |
13 #include "components/test_runner/web_test_delegate.h" | |
14 #include "third_party/WebKit/public/web/WebSpeechRecognitionResult.h" | |
15 #include "third_party/WebKit/public/web/WebSpeechRecognizerClient.h" | |
16 | |
17 namespace test_runner { | |
18 | |
19 namespace { | |
20 | |
21 // Task class for calling a client function that does not take any parameters. | |
22 typedef void (blink::WebSpeechRecognizerClient::*ClientFunctionPointer)( | |
23 const blink::WebSpeechRecognitionHandle&); | |
24 class ClientCallTask : public MockWebSpeechRecognizer::Task { | |
25 public: | |
26 ClientCallTask(MockWebSpeechRecognizer* mock, ClientFunctionPointer function) | |
27 : MockWebSpeechRecognizer::Task(mock), function_(function) {} | |
28 | |
29 ~ClientCallTask() override {} | |
30 | |
31 void run() override { | |
32 (recognizer_->Client()->*function_)(recognizer_->Handle()); | |
33 } | |
34 | |
35 private: | |
36 ClientFunctionPointer function_; | |
37 | |
38 DISALLOW_COPY_AND_ASSIGN(ClientCallTask); | |
39 }; | |
40 | |
41 // Task for delivering a result event. | |
42 class ResultTask : public MockWebSpeechRecognizer::Task { | |
43 public: | |
44 ResultTask(MockWebSpeechRecognizer* mock, | |
45 const blink::WebString transcript, | |
46 float confidence) | |
47 : MockWebSpeechRecognizer::Task(mock), | |
48 transcript_(transcript), | |
49 confidence_(confidence) {} | |
50 | |
51 ~ResultTask() override {} | |
52 | |
53 void run() override { | |
54 blink::WebVector<blink::WebString> transcripts(static_cast<size_t>(1)); | |
55 blink::WebVector<float> confidences(static_cast<size_t>(1)); | |
56 transcripts[0] = transcript_; | |
57 confidences[0] = confidence_; | |
58 blink::WebVector<blink::WebSpeechRecognitionResult> final_results( | |
59 static_cast<size_t>(1)); | |
60 blink::WebVector<blink::WebSpeechRecognitionResult> interim_results; | |
61 final_results[0].assign(transcripts, confidences, true); | |
62 | |
63 recognizer_->Client()->didReceiveResults( | |
64 recognizer_->Handle(), final_results, interim_results); | |
65 } | |
66 | |
67 private: | |
68 blink::WebString transcript_; | |
69 float confidence_; | |
70 | |
71 DISALLOW_COPY_AND_ASSIGN(ResultTask); | |
72 }; | |
73 | |
74 // Task for delivering a nomatch event. | |
75 class NoMatchTask : public MockWebSpeechRecognizer::Task { | |
76 public: | |
77 NoMatchTask(MockWebSpeechRecognizer* mock) | |
78 : MockWebSpeechRecognizer::Task(mock) {} | |
79 | |
80 ~NoMatchTask() override {} | |
81 | |
82 void run() override { | |
83 recognizer_->Client()->didReceiveNoMatch( | |
84 recognizer_->Handle(), blink::WebSpeechRecognitionResult()); | |
85 } | |
86 | |
87 private: | |
88 DISALLOW_COPY_AND_ASSIGN(NoMatchTask); | |
89 }; | |
90 | |
91 // Task for delivering an error event. | |
92 class ErrorTask : public MockWebSpeechRecognizer::Task { | |
93 public: | |
94 ErrorTask(MockWebSpeechRecognizer* mock, | |
95 blink::WebSpeechRecognizerClient::ErrorCode code, | |
96 const blink::WebString& message) | |
97 : MockWebSpeechRecognizer::Task(mock), code_(code), message_(message) {} | |
98 | |
99 ~ErrorTask() override {} | |
100 | |
101 void run() override { | |
102 recognizer_->Client()->didReceiveError( | |
103 recognizer_->Handle(), message_, code_); | |
104 } | |
105 | |
106 private: | |
107 blink::WebSpeechRecognizerClient::ErrorCode code_; | |
108 blink::WebString message_; | |
109 | |
110 DISALLOW_COPY_AND_ASSIGN(ErrorTask); | |
111 }; | |
112 | |
113 // Task for tidying up after recognition task has ended. | |
114 class EndedTask : public MockWebSpeechRecognizer::Task { | |
115 public: | |
116 EndedTask(MockWebSpeechRecognizer* mock) | |
117 : MockWebSpeechRecognizer::Task(mock) {} | |
118 | |
119 ~EndedTask() override {} | |
120 | |
121 void run() override { | |
122 blink::WebSpeechRecognitionHandle handle = recognizer_->Handle(); | |
123 blink::WebSpeechRecognizerClient* client = recognizer_->Client(); | |
124 recognizer_->SetClientContext(blink::WebSpeechRecognitionHandle(), nullptr); | |
125 client->didEnd(handle); | |
126 } | |
127 | |
128 private: | |
129 DISALLOW_COPY_AND_ASSIGN(EndedTask); | |
130 }; | |
131 | |
132 // Task for switching processing to the next (handle, client) pairing. | |
133 class SwitchClientHandleTask : public MockWebSpeechRecognizer::Task { | |
134 public: | |
135 SwitchClientHandleTask(MockWebSpeechRecognizer* mock, | |
136 const blink::WebSpeechRecognitionHandle& handle, | |
137 blink::WebSpeechRecognizerClient* client) | |
138 : MockWebSpeechRecognizer::Task(mock), handle_(handle), client_(client) {} | |
139 | |
140 ~SwitchClientHandleTask() override {} | |
141 | |
142 bool isNewContextTask() const override { return true; } | |
143 | |
144 void run() override { recognizer_->SetClientContext(handle_, client_); } | |
145 | |
146 private: | |
147 const blink::WebSpeechRecognitionHandle handle_; | |
148 blink::WebSpeechRecognizerClient* client_; | |
149 | |
150 DISALLOW_COPY_AND_ASSIGN(SwitchClientHandleTask); | |
151 }; | |
152 | |
153 } // namespace | |
154 | |
155 MockWebSpeechRecognizer::MockWebSpeechRecognizer() | |
156 : client_(nullptr), | |
157 was_aborted_(false), | |
158 task_queue_running_(false), | |
159 delegate_(0), | |
160 weak_factory_(this) {} | |
161 | |
162 MockWebSpeechRecognizer::~MockWebSpeechRecognizer() { | |
163 ClearTaskQueue(); | |
164 } | |
165 | |
166 bool MockWebSpeechRecognizer::Task::isNewContextTask() const { | |
167 return false; | |
168 } | |
169 | |
170 void MockWebSpeechRecognizer::SetDelegate(WebTestDelegate* delegate) { | |
171 delegate_ = delegate; | |
172 } | |
173 | |
174 void MockWebSpeechRecognizer::SetClientContext( | |
175 const blink::WebSpeechRecognitionHandle& handle, | |
176 blink::WebSpeechRecognizerClient* client) { | |
177 handle_ = handle; | |
178 client_ = client; | |
179 } | |
180 | |
181 void MockWebSpeechRecognizer::start( | |
182 const blink::WebSpeechRecognitionHandle& handle, | |
183 const blink::WebSpeechRecognitionParams& params, | |
184 blink::WebSpeechRecognizerClient* client) { | |
185 was_aborted_ = false; | |
186 if (!client_ && !HasPendingNewContextTasks()) { | |
187 handle_ = handle; | |
188 client_ = client; | |
189 } else { | |
190 task_queue_.push_back(new SwitchClientHandleTask(this, handle, client)); | |
191 } | |
192 | |
193 task_queue_.push_back( | |
194 new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didStart)); | |
195 task_queue_.push_back(new ClientCallTask( | |
196 this, &blink::WebSpeechRecognizerClient::didStartAudio)); | |
197 task_queue_.push_back(new ClientCallTask( | |
198 this, &blink::WebSpeechRecognizerClient::didStartSound)); | |
199 | |
200 if (!mock_transcripts_.empty()) { | |
201 DCHECK_EQ(mock_transcripts_.size(), mock_confidences_.size()); | |
202 | |
203 for (size_t i = 0; i < mock_transcripts_.size(); ++i) | |
204 task_queue_.push_back( | |
205 new ResultTask(this, mock_transcripts_[i], mock_confidences_[i])); | |
206 | |
207 mock_transcripts_.clear(); | |
208 mock_confidences_.clear(); | |
209 } else | |
210 task_queue_.push_back(new NoMatchTask(this)); | |
211 | |
212 task_queue_.push_back( | |
213 new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didEndSound)); | |
214 task_queue_.push_back( | |
215 new ClientCallTask(this, &blink::WebSpeechRecognizerClient::didEndAudio)); | |
216 task_queue_.push_back(new EndedTask(this)); | |
217 | |
218 StartTaskQueue(); | |
219 } | |
220 | |
221 void MockWebSpeechRecognizer::stop( | |
222 const blink::WebSpeechRecognitionHandle& handle, | |
223 blink::WebSpeechRecognizerClient* client) { | |
224 SetClientContext(handle, client); | |
225 | |
226 // FIXME: Implement. | |
227 NOTREACHED(); | |
228 } | |
229 | |
230 void MockWebSpeechRecognizer::abort( | |
231 const blink::WebSpeechRecognitionHandle& handle, | |
232 blink::WebSpeechRecognizerClient* client) { | |
233 was_aborted_ = true; | |
234 ClearTaskQueue(); | |
235 task_queue_.push_back(new SwitchClientHandleTask(this, handle, client)); | |
236 task_queue_.push_back(new EndedTask(this)); | |
237 | |
238 StartTaskQueue(); | |
239 } | |
240 | |
241 void MockWebSpeechRecognizer::AddMockResult(const blink::WebString& transcript, | |
242 float confidence) { | |
243 mock_transcripts_.push_back(transcript); | |
244 mock_confidences_.push_back(confidence); | |
245 } | |
246 | |
247 void MockWebSpeechRecognizer::SetError(const blink::WebString& error, | |
248 const blink::WebString& message) { | |
249 blink::WebSpeechRecognizerClient::ErrorCode code; | |
250 if (error == "OtherError") | |
251 code = blink::WebSpeechRecognizerClient::OtherError; | |
252 else if (error == "NoSpeechError") | |
253 code = blink::WebSpeechRecognizerClient::NoSpeechError; | |
254 else if (error == "AbortedError") | |
255 code = blink::WebSpeechRecognizerClient::AbortedError; | |
256 else if (error == "AudioCaptureError") | |
257 code = blink::WebSpeechRecognizerClient::AudioCaptureError; | |
258 else if (error == "NetworkError") | |
259 code = blink::WebSpeechRecognizerClient::NetworkError; | |
260 else if (error == "NotAllowedError") | |
261 code = blink::WebSpeechRecognizerClient::NotAllowedError; | |
262 else if (error == "ServiceNotAllowedError") | |
263 code = blink::WebSpeechRecognizerClient::ServiceNotAllowedError; | |
264 else if (error == "BadGrammarError") | |
265 code = blink::WebSpeechRecognizerClient::BadGrammarError; | |
266 else if (error == "LanguageNotSupportedError") | |
267 code = blink::WebSpeechRecognizerClient::LanguageNotSupportedError; | |
268 else | |
269 return; | |
270 | |
271 ClearTaskQueue(); | |
272 task_queue_.push_back(new ErrorTask(this, code, message)); | |
273 task_queue_.push_back(new EndedTask(this)); | |
274 | |
275 StartTaskQueue(); | |
276 } | |
277 | |
278 void MockWebSpeechRecognizer::StartTaskQueue() { | |
279 if (task_queue_running_) | |
280 return; | |
281 PostRunTaskFromQueue(); | |
282 } | |
283 | |
284 void MockWebSpeechRecognizer::ClearTaskQueue() { | |
285 while (!task_queue_.empty()) { | |
286 Task* task = task_queue_.front(); | |
287 if (task->isNewContextTask()) | |
288 break; | |
289 delete task_queue_.front(); | |
290 task_queue_.pop_front(); | |
291 } | |
292 if (task_queue_.empty()) | |
293 task_queue_running_ = false; | |
294 } | |
295 | |
296 void MockWebSpeechRecognizer::PostRunTaskFromQueue() { | |
297 task_queue_running_ = true; | |
298 delegate_->PostTask(base::Bind(&MockWebSpeechRecognizer::RunTaskFromQueue, | |
299 weak_factory_.GetWeakPtr())); | |
300 } | |
301 | |
302 void MockWebSpeechRecognizer::RunTaskFromQueue() { | |
303 if (task_queue_.empty()) { | |
304 task_queue_running_ = false; | |
305 return; | |
306 } | |
307 | |
308 MockWebSpeechRecognizer::Task* task = task_queue_.front(); | |
309 task_queue_.pop_front(); | |
310 task->run(); | |
311 delete task; | |
312 | |
313 if (task_queue_.empty()) { | |
314 task_queue_running_ = false; | |
315 return; | |
316 } | |
317 | |
318 PostRunTaskFromQueue(); | |
319 } | |
320 | |
321 bool MockWebSpeechRecognizer::HasPendingNewContextTasks() const { | |
322 for (const auto& task : task_queue_) { | |
323 if (task->isNewContextTask()) | |
324 return true; | |
325 } | |
326 return false; | |
327 } | |
328 | |
329 } // namespace test_runner | |
OLD | NEW |