OLD | NEW |
(Empty) | |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "content/shell/renderer/test_runner/MockWebSpeechRecognizer.h" |
| 6 |
| 7 #include "content/shell/renderer/test_runner/WebTestDelegate.h" |
| 8 #include "third_party/WebKit/public/web/WebSpeechRecognitionResult.h" |
| 9 #include "third_party/WebKit/public/web/WebSpeechRecognizerClient.h" |
| 10 |
| 11 using namespace blink; |
| 12 using namespace std; |
| 13 |
| 14 namespace WebTestRunner { |
| 15 |
| 16 namespace { |
| 17 |
| 18 // Task class for calling a client function that does not take any parameters. |
| 19 typedef void (WebSpeechRecognizerClient::*ClientFunctionPointer)(const WebSpeech
RecognitionHandle&); |
| 20 class ClientCallTask : public MockWebSpeechRecognizer::Task { |
| 21 public: |
| 22 ClientCallTask(MockWebSpeechRecognizer* mock, ClientFunctionPointer function
) |
| 23 : MockWebSpeechRecognizer::Task(mock) |
| 24 , m_function(function) |
| 25 { |
| 26 } |
| 27 |
| 28 virtual void run() OVERRIDE { (m_recognizer->client()->*m_function)(m_recogn
izer->handle()); } |
| 29 |
| 30 private: |
| 31 ClientFunctionPointer m_function; |
| 32 }; |
| 33 |
| 34 // Task for delivering a result event. |
| 35 class ResultTask : public MockWebSpeechRecognizer::Task { |
| 36 public: |
| 37 ResultTask(MockWebSpeechRecognizer* mock, const WebString transcript, float
confidence) |
| 38 : MockWebSpeechRecognizer::Task(mock) |
| 39 , m_transcript(transcript) |
| 40 , m_confidence(confidence) |
| 41 { |
| 42 } |
| 43 |
| 44 virtual void run() OVERRIDE |
| 45 { |
| 46 WebVector<WebString> transcripts(static_cast<size_t>(1)); |
| 47 WebVector<float> confidences(static_cast<size_t>(1)); |
| 48 transcripts[0] = m_transcript; |
| 49 confidences[0] = m_confidence; |
| 50 WebVector<WebSpeechRecognitionResult> finalResults(static_cast<size_t>(1
)); |
| 51 WebVector<WebSpeechRecognitionResult> interimResults; |
| 52 finalResults[0].assign(transcripts, confidences, true); |
| 53 |
| 54 m_recognizer->client()->didReceiveResults(m_recognizer->handle(), finalR
esults, interimResults); |
| 55 } |
| 56 |
| 57 private: |
| 58 WebString m_transcript; |
| 59 float m_confidence; |
| 60 }; |
| 61 |
| 62 // Task for delivering a nomatch event. |
| 63 class NoMatchTask : public MockWebSpeechRecognizer::Task { |
| 64 public: |
| 65 NoMatchTask(MockWebSpeechRecognizer* mock) : MockWebSpeechRecognizer::Task(m
ock) { } |
| 66 virtual void run() OVERRIDE { m_recognizer->client()->didReceiveNoMatch(m_re
cognizer->handle(), WebSpeechRecognitionResult()); } |
| 67 }; |
| 68 |
| 69 // Task for delivering an error event. |
| 70 class ErrorTask : public MockWebSpeechRecognizer::Task { |
| 71 public: |
| 72 ErrorTask(MockWebSpeechRecognizer* mock, WebSpeechRecognizerClient::ErrorCod
e code, const WebString& message) |
| 73 : MockWebSpeechRecognizer::Task(mock) |
| 74 , m_code(code) |
| 75 , m_message(message) |
| 76 { |
| 77 } |
| 78 |
| 79 virtual void run() OVERRIDE { m_recognizer->client()->didReceiveError(m_reco
gnizer->handle(), m_message, m_code); } |
| 80 |
| 81 private: |
| 82 WebSpeechRecognizerClient::ErrorCode m_code; |
| 83 WebString m_message; |
| 84 }; |
| 85 |
| 86 } // namespace |
| 87 |
| 88 MockWebSpeechRecognizer::MockWebSpeechRecognizer() |
| 89 : m_wasAborted(false) |
| 90 , m_taskQueueRunning(false) |
| 91 , m_delegate(0) |
| 92 { |
| 93 } |
| 94 |
| 95 MockWebSpeechRecognizer::~MockWebSpeechRecognizer() |
| 96 { |
| 97 clearTaskQueue(); |
| 98 } |
| 99 |
| 100 void MockWebSpeechRecognizer::setDelegate(WebTestDelegate* delegate) |
| 101 { |
| 102 m_delegate = delegate; |
| 103 } |
| 104 |
| 105 void MockWebSpeechRecognizer::start(const WebSpeechRecognitionHandle& handle, co
nst WebSpeechRecognitionParams& params, WebSpeechRecognizerClient* client) |
| 106 { |
| 107 m_wasAborted = false; |
| 108 m_handle = handle; |
| 109 m_client = client; |
| 110 |
| 111 m_taskQueue.push_back(new ClientCallTask(this, &WebSpeechRecognizerClient::d
idStart)); |
| 112 m_taskQueue.push_back(new ClientCallTask(this, &WebSpeechRecognizerClient::d
idStartAudio)); |
| 113 m_taskQueue.push_back(new ClientCallTask(this, &WebSpeechRecognizerClient::d
idStartSound)); |
| 114 |
| 115 if (!m_mockTranscripts.empty()) { |
| 116 BLINK_ASSERT(m_mockTranscripts.size() == m_mockConfidences.size()); |
| 117 |
| 118 for (size_t i = 0; i < m_mockTranscripts.size(); ++i) |
| 119 m_taskQueue.push_back(new ResultTask(this, m_mockTranscripts[i], m_m
ockConfidences[i])); |
| 120 |
| 121 m_mockTranscripts.clear(); |
| 122 m_mockConfidences.clear(); |
| 123 } else |
| 124 m_taskQueue.push_back(new NoMatchTask(this)); |
| 125 |
| 126 m_taskQueue.push_back(new ClientCallTask(this, &WebSpeechRecognizerClient::d
idEndSound)); |
| 127 m_taskQueue.push_back(new ClientCallTask(this, &WebSpeechRecognizerClient::d
idEndAudio)); |
| 128 m_taskQueue.push_back(new ClientCallTask(this, &WebSpeechRecognizerClient::d
idEnd)); |
| 129 |
| 130 startTaskQueue(); |
| 131 } |
| 132 |
| 133 void MockWebSpeechRecognizer::stop(const WebSpeechRecognitionHandle& handle, Web
SpeechRecognizerClient* client) |
| 134 { |
| 135 m_handle = handle; |
| 136 m_client = client; |
| 137 |
| 138 // FIXME: Implement. |
| 139 BLINK_ASSERT_NOT_REACHED(); |
| 140 } |
| 141 |
| 142 void MockWebSpeechRecognizer::abort(const WebSpeechRecognitionHandle& handle, We
bSpeechRecognizerClient* client) |
| 143 { |
| 144 m_handle = handle; |
| 145 m_client = client; |
| 146 |
| 147 clearTaskQueue(); |
| 148 m_wasAborted = true; |
| 149 m_taskQueue.push_back(new ClientCallTask(this, &WebSpeechRecognizerClient::d
idEnd)); |
| 150 startTaskQueue(); |
| 151 } |
| 152 |
| 153 void MockWebSpeechRecognizer::addMockResult(const WebString& transcript, float c
onfidence) |
| 154 { |
| 155 m_mockTranscripts.push_back(transcript); |
| 156 m_mockConfidences.push_back(confidence); |
| 157 } |
| 158 |
| 159 void MockWebSpeechRecognizer::setError(const WebString& error, const WebString&
message) |
| 160 { |
| 161 WebSpeechRecognizerClient::ErrorCode code; |
| 162 if (error == "OtherError") |
| 163 code = WebSpeechRecognizerClient::OtherError; |
| 164 else if (error == "NoSpeechError") |
| 165 code = WebSpeechRecognizerClient::NoSpeechError; |
| 166 else if (error == "AbortedError") |
| 167 code = WebSpeechRecognizerClient::AbortedError; |
| 168 else if (error == "AudioCaptureError") |
| 169 code = WebSpeechRecognizerClient::AudioCaptureError; |
| 170 else if (error == "NetworkError") |
| 171 code = WebSpeechRecognizerClient::NetworkError; |
| 172 else if (error == "NotAllowedError") |
| 173 code = WebSpeechRecognizerClient::NotAllowedError; |
| 174 else if (error == "ServiceNotAllowedError") |
| 175 code = WebSpeechRecognizerClient::ServiceNotAllowedError; |
| 176 else if (error == "BadGrammarError") |
| 177 code = WebSpeechRecognizerClient::BadGrammarError; |
| 178 else if (error == "LanguageNotSupportedError") |
| 179 code = WebSpeechRecognizerClient::LanguageNotSupportedError; |
| 180 else |
| 181 return; |
| 182 |
| 183 clearTaskQueue(); |
| 184 m_taskQueue.push_back(new ErrorTask(this, code, message)); |
| 185 m_taskQueue.push_back(new ClientCallTask(this, &WebSpeechRecognizerClient::d
idEnd)); |
| 186 startTaskQueue(); |
| 187 } |
| 188 |
| 189 void MockWebSpeechRecognizer::startTaskQueue() |
| 190 { |
| 191 if (m_taskQueueRunning) |
| 192 return; |
| 193 m_delegate->postTask(new StepTask(this)); |
| 194 m_taskQueueRunning = true; |
| 195 } |
| 196 |
| 197 void MockWebSpeechRecognizer::clearTaskQueue() |
| 198 { |
| 199 while (!m_taskQueue.empty()) { |
| 200 delete m_taskQueue.front(); |
| 201 m_taskQueue.pop_front(); |
| 202 } |
| 203 m_taskQueueRunning = false; |
| 204 } |
| 205 |
| 206 void MockWebSpeechRecognizer::StepTask::runIfValid() |
| 207 { |
| 208 if (m_object->m_taskQueue.empty()) { |
| 209 m_object->m_taskQueueRunning = false; |
| 210 return; |
| 211 } |
| 212 |
| 213 Task* task = m_object->m_taskQueue.front(); |
| 214 m_object->m_taskQueue.pop_front(); |
| 215 task->run(); |
| 216 delete task; |
| 217 |
| 218 if (m_object->m_taskQueue.empty()) { |
| 219 m_object->m_taskQueueRunning = false; |
| 220 return; |
| 221 } |
| 222 |
| 223 m_object->m_delegate->postTask(new StepTask(m_object)); |
| 224 } |
| 225 |
| 226 } |
OLD | NEW |