| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <queue> | 5 #include <queue> |
| 6 | 6 |
| 7 #include "base/memory/scoped_ptr.h" | 7 #include "base/memory/scoped_ptr.h" |
| 8 #include "base/message_loop.h" | 8 #include "base/message_loop.h" |
| 9 #include "base/utf_string_conversions.h" | 9 #include "base/utf_string_conversions.h" |
| 10 #include "content/browser/speech/audio_buffer.h" | 10 #include "content/browser/speech/audio_buffer.h" |
| (...skipping 20 matching lines...) Expand all Loading... |
| 31 public: | 31 public: |
| 32 GoogleStreamingRemoteEngineTest() | 32 GoogleStreamingRemoteEngineTest() |
| 33 : last_number_of_upstream_chunks_seen_(0U), | 33 : last_number_of_upstream_chunks_seen_(0U), |
| 34 error_(SPEECH_RECOGNITION_ERROR_NONE) { } | 34 error_(SPEECH_RECOGNITION_ERROR_NONE) { } |
| 35 | 35 |
| 36 // Creates a speech recognition request and invokes its URL fetcher delegate | 36 // Creates a speech recognition request and invokes its URL fetcher delegate |
| 37 // with the given test data. | 37 // with the given test data. |
| 38 void CreateAndTestRequest(bool success, const std::string& http_response); | 38 void CreateAndTestRequest(bool success, const std::string& http_response); |
| 39 | 39 |
| 40 // SpeechRecognitionRequestDelegate methods. | 40 // SpeechRecognitionRequestDelegate methods. |
| 41 virtual void OnSpeechRecognitionEngineResult( | 41 virtual void OnSpeechRecognitionEngineResults( |
| 42 const SpeechRecognitionResult& result) OVERRIDE { | 42 const SpeechRecognitionResults& results) OVERRIDE { |
| 43 results_.push(result); | 43 results_.push(results); |
| 44 } | 44 } |
| 45 virtual void OnSpeechRecognitionEngineError( | 45 virtual void OnSpeechRecognitionEngineError( |
| 46 const SpeechRecognitionError& error) OVERRIDE { | 46 const SpeechRecognitionError& error) OVERRIDE { |
| 47 error_ = error.code; | 47 error_ = error.code; |
| 48 } | 48 } |
| 49 | 49 |
| 50 // testing::Test methods. | 50 // testing::Test methods. |
| 51 virtual void SetUp() OVERRIDE; | 51 virtual void SetUp() OVERRIDE; |
| 52 virtual void TearDown() OVERRIDE; | 52 virtual void TearDown() OVERRIDE; |
| 53 | 53 |
| 54 protected: | 54 protected: |
| 55 enum DownstreamError { | 55 enum DownstreamError { |
| 56 DOWNSTREAM_ERROR_NONE, | 56 DOWNSTREAM_ERROR_NONE, |
| 57 DOWNSTREAM_ERROR_HTTP500, | 57 DOWNSTREAM_ERROR_HTTP500, |
| 58 DOWNSTREAM_ERROR_NETWORK, | 58 DOWNSTREAM_ERROR_NETWORK, |
| 59 DOWNSTREAM_ERROR_WEBSERVICE_NO_MATCH | 59 DOWNSTREAM_ERROR_WEBSERVICE_NO_MATCH |
| 60 }; | 60 }; |
| 61 static bool ResultsAreEqual(const SpeechRecognitionResult& a, | 61 static bool ResultsAreEqual(const SpeechRecognitionResults& a, |
| 62 const SpeechRecognitionResult& b); | 62 const SpeechRecognitionResults& b); |
| 63 static std::string SerializeProtobufResponse( | 63 static std::string SerializeProtobufResponse( |
| 64 const proto::SpeechRecognitionEvent& msg); | 64 const proto::SpeechRecognitionEvent& msg); |
| 65 static std::string ToBigEndian32(uint32 value); | 65 static std::string ToBigEndian32(uint32 value); |
| 66 | 66 |
| 67 TestURLFetcher* GetUpstreamFetcher(); | 67 TestURLFetcher* GetUpstreamFetcher(); |
| 68 TestURLFetcher* GetDownstreamFetcher(); | 68 TestURLFetcher* GetDownstreamFetcher(); |
| 69 void StartMockRecognition(); | 69 void StartMockRecognition(); |
| 70 void EndMockRecognition(); | 70 void EndMockRecognition(); |
| 71 void InjectDummyAudioChunk(); | 71 void InjectDummyAudioChunk(); |
| 72 size_t UpstreamChunksUploadedFromLastCall(); | 72 size_t UpstreamChunksUploadedFromLastCall(); |
| 73 void ProvideMockProtoResultDownstream( | 73 void ProvideMockProtoResultDownstream( |
| 74 const proto::SpeechRecognitionEvent& result); | 74 const proto::SpeechRecognitionEvent& result); |
| 75 void ProvideMockResultDownstream(const SpeechRecognitionResult& result); | 75 void ProvideMockResultDownstream(const SpeechRecognitionResult& result); |
| 76 void ExpectResultReceived(const SpeechRecognitionResult& result); | 76 void ExpectResultsReceived(const SpeechRecognitionResults& result); |
| 77 void CloseMockDownstream(DownstreamError error); | 77 void CloseMockDownstream(DownstreamError error); |
| 78 | 78 |
| 79 scoped_ptr<GoogleStreamingRemoteEngine> engine_under_test_; | 79 scoped_ptr<GoogleStreamingRemoteEngine> engine_under_test_; |
| 80 TestURLFetcherFactory url_fetcher_factory_; | 80 TestURLFetcherFactory url_fetcher_factory_; |
| 81 size_t last_number_of_upstream_chunks_seen_; | 81 size_t last_number_of_upstream_chunks_seen_; |
| 82 MessageLoop message_loop_; | 82 MessageLoop message_loop_; |
| 83 std::string response_buffer_; | 83 std::string response_buffer_; |
| 84 SpeechRecognitionErrorCode error_; | 84 SpeechRecognitionErrorCode error_; |
| 85 std::queue<SpeechRecognitionResult> results_; | 85 std::queue<SpeechRecognitionResults> results_; |
| 86 }; | 86 }; |
| 87 | 87 |
| 88 TEST_F(GoogleStreamingRemoteEngineTest, SingleDefinitiveResult) { | 88 TEST_F(GoogleStreamingRemoteEngineTest, SingleDefinitiveResult) { |
| 89 StartMockRecognition(); | 89 StartMockRecognition(); |
| 90 ASSERT_TRUE(GetUpstreamFetcher()); | 90 ASSERT_TRUE(GetUpstreamFetcher()); |
| 91 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); | 91 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); |
| 92 | 92 |
| 93 // Inject some dummy audio chunks and check a corresponding chunked upload | 93 // Inject some dummy audio chunks and check a corresponding chunked upload |
| 94 // is performed every time on the server. | 94 // is performed every time on the server. |
| 95 for (int i = 0; i < 3; ++i) { | 95 for (int i = 0; i < 3; ++i) { |
| 96 InjectDummyAudioChunk(); | 96 InjectDummyAudioChunk(); |
| 97 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); | 97 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); |
| 98 } | 98 } |
| 99 | 99 |
| 100 // Ensure that a final (empty) audio chunk is uploaded on chunks end. | 100 // Ensure that a final (empty) audio chunk is uploaded on chunks end. |
| 101 engine_under_test_->AudioChunksEnded(); | 101 engine_under_test_->AudioChunksEnded(); |
| 102 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); | 102 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); |
| 103 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 103 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 104 | 104 |
| 105 // Simulate a protobuf message streamed from the server containing a single | 105 // Simulate a protobuf message streamed from the server containing a single |
| 106 // result with two hypotheses. | 106 // result with two hypotheses. |
| 107 SpeechRecognitionResult result; | 107 SpeechRecognitionResults results; |
| 108 results.push_back(SpeechRecognitionResult()); |
| 109 SpeechRecognitionResult& result = results.back(); |
| 108 result.is_provisional = false; | 110 result.is_provisional = false; |
| 109 result.hypotheses.push_back( | 111 result.hypotheses.push_back( |
| 110 SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis 1"), 0.1F)); | 112 SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis 1"), 0.1F)); |
| 111 result.hypotheses.push_back( | 113 result.hypotheses.push_back( |
| 112 SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis 2"), 0.2F)); | 114 SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis 2"), 0.2F)); |
| 113 | 115 |
| 114 ProvideMockResultDownstream(result); | 116 ProvideMockResultDownstream(result); |
| 115 ExpectResultReceived(result); | 117 ExpectResultsReceived(results); |
| 116 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 118 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 117 | 119 |
| 118 // Ensure everything is closed cleanly after the downstream is closed. | 120 // Ensure everything is closed cleanly after the downstream is closed. |
| 119 CloseMockDownstream(DOWNSTREAM_ERROR_NONE); | 121 CloseMockDownstream(DOWNSTREAM_ERROR_NONE); |
| 120 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); | 122 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); |
| 121 EndMockRecognition(); | 123 EndMockRecognition(); |
| 122 ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); | 124 ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); |
| 123 ASSERT_EQ(0U, results_.size()); | 125 ASSERT_EQ(0U, results_.size()); |
| 124 } | 126 } |
| 125 | 127 |
| 126 TEST_F(GoogleStreamingRemoteEngineTest, SeveralStreamingResults) { | 128 TEST_F(GoogleStreamingRemoteEngineTest, SeveralStreamingResults) { |
| 127 StartMockRecognition(); | 129 StartMockRecognition(); |
| 128 ASSERT_TRUE(GetUpstreamFetcher()); | 130 ASSERT_TRUE(GetUpstreamFetcher()); |
| 129 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); | 131 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); |
| 130 | 132 |
| 131 for (int i = 0; i < 4; ++i) { | 133 for (int i = 0; i < 4; ++i) { |
| 132 InjectDummyAudioChunk(); | 134 InjectDummyAudioChunk(); |
| 133 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); | 135 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); |
| 134 | 136 |
| 135 SpeechRecognitionResult result; | 137 SpeechRecognitionResults results; |
| 138 results.push_back(SpeechRecognitionResult()); |
| 139 SpeechRecognitionResult& result = results.back(); |
| 136 result.is_provisional = (i % 2 == 0); // Alternate result types. | 140 result.is_provisional = (i % 2 == 0); // Alternate result types. |
| 137 float confidence = result.is_provisional ? 0.0F : (i * 0.1F); | 141 float confidence = result.is_provisional ? 0.0F : (i * 0.1F); |
| 138 result.hypotheses.push_back( | 142 result.hypotheses.push_back( |
| 139 SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis"), confidence)); | 143 SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis"), confidence)); |
| 140 | 144 |
| 141 ProvideMockResultDownstream(result); | 145 ProvideMockResultDownstream(result); |
| 142 ExpectResultReceived(result); | 146 ExpectResultsReceived(results); |
| 143 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 147 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 144 } | 148 } |
| 145 | 149 |
| 146 // Ensure that a final (empty) audio chunk is uploaded on chunks end. | 150 // Ensure that a final (empty) audio chunk is uploaded on chunks end. |
| 147 engine_under_test_->AudioChunksEnded(); | 151 engine_under_test_->AudioChunksEnded(); |
| 148 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); | 152 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); |
| 149 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 153 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 150 | 154 |
| 151 // Simulate a final definitive result. | 155 // Simulate a final definitive result. |
| 152 SpeechRecognitionResult result; | 156 SpeechRecognitionResults results; |
| 157 results.push_back(SpeechRecognitionResult()); |
| 158 SpeechRecognitionResult& result = results.back(); |
| 153 result.is_provisional = false; | 159 result.is_provisional = false; |
| 154 result.hypotheses.push_back( | 160 result.hypotheses.push_back( |
| 155 SpeechRecognitionHypothesis(UTF8ToUTF16("The final result"), 1.0F)); | 161 SpeechRecognitionHypothesis(UTF8ToUTF16("The final result"), 1.0F)); |
| 156 ProvideMockResultDownstream(result); | 162 ProvideMockResultDownstream(result); |
| 157 ExpectResultReceived(result); | 163 ExpectResultsReceived(results); |
| 158 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 164 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 159 | 165 |
| 160 // Ensure everything is closed cleanly after the downstream is closed. | 166 // Ensure everything is closed cleanly after the downstream is closed. |
| 161 CloseMockDownstream(DOWNSTREAM_ERROR_NONE); | 167 CloseMockDownstream(DOWNSTREAM_ERROR_NONE); |
| 162 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); | 168 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); |
| 163 EndMockRecognition(); | 169 EndMockRecognition(); |
| 164 ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); | 170 ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); |
| 165 ASSERT_EQ(0U, results_.size()); | 171 ASSERT_EQ(0U, results_.size()); |
| 166 } | 172 } |
| 167 | 173 |
| 168 TEST_F(GoogleStreamingRemoteEngineTest, NoFinalResultAfterAudioChunksEnded) { | 174 TEST_F(GoogleStreamingRemoteEngineTest, NoFinalResultAfterAudioChunksEnded) { |
| 169 StartMockRecognition(); | 175 StartMockRecognition(); |
| 170 ASSERT_TRUE(GetUpstreamFetcher()); | 176 ASSERT_TRUE(GetUpstreamFetcher()); |
| 171 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); | 177 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); |
| 172 | 178 |
| 173 // Simulate one pushed audio chunk. | 179 // Simulate one pushed audio chunk. |
| 174 InjectDummyAudioChunk(); | 180 InjectDummyAudioChunk(); |
| 175 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); | 181 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); |
| 176 | 182 |
| 177 // Simulate the corresponding definitive result. | 183 // Simulate the corresponding definitive result. |
| 178 SpeechRecognitionResult result; | 184 SpeechRecognitionResults results; |
| 185 results.push_back(SpeechRecognitionResult()); |
| 186 SpeechRecognitionResult& result = results.back(); |
| 179 result.hypotheses.push_back( | 187 result.hypotheses.push_back( |
| 180 SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis"), 1.0F)); | 188 SpeechRecognitionHypothesis(UTF8ToUTF16("hypothesis"), 1.0F)); |
| 181 ProvideMockResultDownstream(result); | 189 ProvideMockResultDownstream(result); |
| 182 ExpectResultReceived(result); | 190 ExpectResultsReceived(results); |
| 183 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 191 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 184 | 192 |
| 185 // Simulate a silent downstream closure after |AudioChunksEnded|. | 193 // Simulate a silent downstream closure after |AudioChunksEnded|. |
| 186 engine_under_test_->AudioChunksEnded(); | 194 engine_under_test_->AudioChunksEnded(); |
| 187 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); | 195 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); |
| 188 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 196 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 189 CloseMockDownstream(DOWNSTREAM_ERROR_NONE); | 197 CloseMockDownstream(DOWNSTREAM_ERROR_NONE); |
| 190 | 198 |
| 191 // Expect an empty result, aimed at notifying recognition ended with no | 199 // Expect an empty result, aimed at notifying recognition ended with no |
| 192 // actual results nor errors. | 200 // actual results nor errors. |
| 193 SpeechRecognitionResult empty_result; | 201 SpeechRecognitionResults empty_results; |
| 194 ExpectResultReceived(empty_result); | 202 ExpectResultsReceived(empty_results); |
| 195 | 203 |
| 196 // Ensure everything is closed cleanly after the downstream is closed. | 204 // Ensure everything is closed cleanly after the downstream is closed. |
| 197 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); | 205 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); |
| 198 EndMockRecognition(); | 206 EndMockRecognition(); |
| 199 ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); | 207 ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); |
| 200 ASSERT_EQ(0U, results_.size()); | 208 ASSERT_EQ(0U, results_.size()); |
| 201 } | 209 } |
| 202 | 210 |
| 203 TEST_F(GoogleStreamingRemoteEngineTest, NoMatchError) { | 211 TEST_F(GoogleStreamingRemoteEngineTest, NoMatchError) { |
| 204 StartMockRecognition(); | 212 StartMockRecognition(); |
| 205 ASSERT_TRUE(GetUpstreamFetcher()); | 213 ASSERT_TRUE(GetUpstreamFetcher()); |
| 206 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); | 214 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); |
| 207 | 215 |
| 208 for (int i = 0; i < 3; ++i) | 216 for (int i = 0; i < 3; ++i) |
| 209 InjectDummyAudioChunk(); | 217 InjectDummyAudioChunk(); |
| 210 engine_under_test_->AudioChunksEnded(); | 218 engine_under_test_->AudioChunksEnded(); |
| 211 ASSERT_EQ(4U, UpstreamChunksUploadedFromLastCall()); | 219 ASSERT_EQ(4U, UpstreamChunksUploadedFromLastCall()); |
| 212 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 220 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 213 | 221 |
| 214 // Simulate only a provisional result. | 222 // Simulate only a provisional result. |
| 215 SpeechRecognitionResult result; | 223 SpeechRecognitionResults results; |
| 224 results.push_back(SpeechRecognitionResult()); |
| 225 SpeechRecognitionResult& result = results.back(); |
| 216 result.is_provisional = true; | 226 result.is_provisional = true; |
| 217 result.hypotheses.push_back( | 227 result.hypotheses.push_back( |
| 218 SpeechRecognitionHypothesis(UTF8ToUTF16("The final result"), 0.0F)); | 228 SpeechRecognitionHypothesis(UTF8ToUTF16("The final result"), 0.0F)); |
| 219 ProvideMockResultDownstream(result); | 229 ProvideMockResultDownstream(result); |
| 220 ExpectResultReceived(result); | 230 ExpectResultsReceived(results); |
| 221 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 231 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 222 | 232 |
| 223 CloseMockDownstream(DOWNSTREAM_ERROR_WEBSERVICE_NO_MATCH); | 233 CloseMockDownstream(DOWNSTREAM_ERROR_WEBSERVICE_NO_MATCH); |
| 224 | 234 |
| 225 // Expect an empty result. | 235 // Expect an empty result. |
| 226 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); | 236 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); |
| 227 EndMockRecognition(); | 237 EndMockRecognition(); |
| 228 SpeechRecognitionResult empty_result; | 238 SpeechRecognitionResults empty_result; |
| 229 ExpectResultReceived(empty_result); | 239 ExpectResultsReceived(empty_result); |
| 230 } | 240 } |
| 231 | 241 |
| 232 TEST_F(GoogleStreamingRemoteEngineTest, HTTPError) { | 242 TEST_F(GoogleStreamingRemoteEngineTest, HTTPError) { |
| 233 StartMockRecognition(); | 243 StartMockRecognition(); |
| 234 ASSERT_TRUE(GetUpstreamFetcher()); | 244 ASSERT_TRUE(GetUpstreamFetcher()); |
| 235 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); | 245 ASSERT_EQ(0U, UpstreamChunksUploadedFromLastCall()); |
| 236 | 246 |
| 237 InjectDummyAudioChunk(); | 247 InjectDummyAudioChunk(); |
| 238 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); | 248 ASSERT_EQ(1U, UpstreamChunksUploadedFromLastCall()); |
| 239 | 249 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 280 proto::SpeechRecognitionEvent proto_event; | 290 proto::SpeechRecognitionEvent proto_event; |
| 281 proto_event.set_status(proto::SpeechRecognitionEvent::STATUS_SUCCESS); | 291 proto_event.set_status(proto::SpeechRecognitionEvent::STATUS_SUCCESS); |
| 282 proto::SpeechRecognitionResult* proto_result = proto_event.add_result(); | 292 proto::SpeechRecognitionResult* proto_result = proto_event.add_result(); |
| 283 proto_result->set_stability(0.5); | 293 proto_result->set_stability(0.5); |
| 284 proto::SpeechRecognitionAlternative *proto_alternative = | 294 proto::SpeechRecognitionAlternative *proto_alternative = |
| 285 proto_result->add_alternative(); | 295 proto_result->add_alternative(); |
| 286 proto_alternative->set_transcript("foo"); | 296 proto_alternative->set_transcript("foo"); |
| 287 ProvideMockProtoResultDownstream(proto_event); | 297 ProvideMockProtoResultDownstream(proto_event); |
| 288 | 298 |
| 289 // Set up expectations. | 299 // Set up expectations. |
| 290 SpeechRecognitionResult expected; | 300 SpeechRecognitionResults results; |
| 291 expected.is_provisional = true; | 301 results.push_back(SpeechRecognitionResult()); |
| 292 expected.hypotheses.push_back( | 302 SpeechRecognitionResult& result = results.back(); |
| 303 result.is_provisional = true; |
| 304 result.hypotheses.push_back( |
| 293 SpeechRecognitionHypothesis(UTF8ToUTF16("foo"), 0.5)); | 305 SpeechRecognitionHypothesis(UTF8ToUTF16("foo"), 0.5)); |
| 294 | 306 |
| 295 // Check that the protobuf generated the expected result. | 307 // Check that the protobuf generated the expected result. |
| 296 ExpectResultReceived(expected); | 308 ExpectResultsReceived(results); |
| 297 | 309 |
| 298 // Since it was a provisional result, recognition is still pending. | 310 // Since it was a provisional result, recognition is still pending. |
| 299 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); | 311 ASSERT_TRUE(engine_under_test_->IsRecognitionPending()); |
| 300 | 312 |
| 301 // Shut down. | 313 // Shut down. |
| 302 CloseMockDownstream(DOWNSTREAM_ERROR_NONE); | 314 CloseMockDownstream(DOWNSTREAM_ERROR_NONE); |
| 303 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); | 315 ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); |
| 304 EndMockRecognition(); | 316 EndMockRecognition(); |
| 305 | 317 |
| 306 // Since there was no final result, we get an empty "no match" result. | 318 // Since there was no final result, we get an empty "no match" result. |
| 307 SpeechRecognitionResult empty_result; | 319 SpeechRecognitionResults empty_result; |
| 308 ExpectResultReceived(empty_result); | 320 ExpectResultsReceived(empty_result); |
| 309 ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); | 321 ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); |
| 310 ASSERT_EQ(0U, results_.size()); | 322 ASSERT_EQ(0U, results_.size()); |
| 311 } | 323 } |
| 312 | 324 |
| 313 void GoogleStreamingRemoteEngineTest::SetUp() { | 325 void GoogleStreamingRemoteEngineTest::SetUp() { |
| 314 engine_under_test_.reset( | 326 engine_under_test_.reset( |
| 315 new GoogleStreamingRemoteEngine(NULL /*URLRequestContextGetter*/)); | 327 new GoogleStreamingRemoteEngine(NULL /*URLRequestContextGetter*/)); |
| 316 engine_under_test_->set_delegate(this); | 328 engine_under_test_->set_delegate(this); |
| 317 } | 329 } |
| 318 | 330 |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 429 | 441 |
| 430 if (error == DOWNSTREAM_ERROR_WEBSERVICE_NO_MATCH) { | 442 if (error == DOWNSTREAM_ERROR_WEBSERVICE_NO_MATCH) { |
| 431 // Send empty response. | 443 // Send empty response. |
| 432 proto::SpeechRecognitionEvent response; | 444 proto::SpeechRecognitionEvent response; |
| 433 response_buffer_.append(SerializeProtobufResponse(response)); | 445 response_buffer_.append(SerializeProtobufResponse(response)); |
| 434 } | 446 } |
| 435 downstream_fetcher->SetResponseString(response_buffer_); | 447 downstream_fetcher->SetResponseString(response_buffer_); |
| 436 downstream_fetcher->delegate()->OnURLFetchComplete(downstream_fetcher); | 448 downstream_fetcher->delegate()->OnURLFetchComplete(downstream_fetcher); |
| 437 } | 449 } |
| 438 | 450 |
| 439 void GoogleStreamingRemoteEngineTest::ExpectResultReceived( | 451 void GoogleStreamingRemoteEngineTest::ExpectResultsReceived( |
| 440 const SpeechRecognitionResult& result) { | 452 const SpeechRecognitionResults& results) { |
| 441 ASSERT_GE(1U, results_.size()); | 453 ASSERT_GE(1U, results_.size()); |
| 442 ASSERT_TRUE(ResultsAreEqual(result, results_.front())); | 454 ASSERT_TRUE(ResultsAreEqual(results, results_.front())); |
| 443 results_.pop(); | 455 results_.pop(); |
| 444 } | 456 } |
| 445 | 457 |
| 446 bool GoogleStreamingRemoteEngineTest::ResultsAreEqual( | 458 bool GoogleStreamingRemoteEngineTest::ResultsAreEqual( |
| 447 const SpeechRecognitionResult& a, const SpeechRecognitionResult& b) { | 459 const SpeechRecognitionResults& a, const SpeechRecognitionResults& b) { |
| 448 if (a.is_provisional != b.is_provisional || | 460 if (a.size() != b.size()) |
| 449 a.hypotheses.size() != b.hypotheses.size()) { | |
| 450 return false; | 461 return false; |
| 451 } | 462 |
| 452 for (size_t i = 0; i < a.hypotheses.size(); ++i) { | 463 SpeechRecognitionResults::const_iterator it_a = a.begin(); |
| 453 const SpeechRecognitionHypothesis& hyp_a = a.hypotheses[i]; | 464 SpeechRecognitionResults::const_iterator it_b = b.begin(); |
| 454 const SpeechRecognitionHypothesis& hyp_b = b.hypotheses[i]; | 465 for (; it_a != a.end() && it_b != b.end(); ++it_a, ++it_b) { |
| 455 if (hyp_a.utterance != hyp_b.utterance || | 466 if (it_a->is_provisional != it_b->is_provisional || |
| 456 hyp_a.confidence != hyp_b.confidence) { | 467 it_a->hypotheses.size() != it_b->hypotheses.size()) { |
| 457 return false; | 468 return false; |
| 458 } | 469 } |
| 470 for (size_t i = 0; i < it_a->hypotheses.size(); ++i) { |
| 471 const SpeechRecognitionHypothesis& hyp_a = it_a->hypotheses[i]; |
| 472 const SpeechRecognitionHypothesis& hyp_b = it_b->hypotheses[i]; |
| 473 if (hyp_a.utterance != hyp_b.utterance || |
| 474 hyp_a.confidence != hyp_b.confidence) { |
| 475 return false; |
| 476 } |
| 477 } |
| 459 } | 478 } |
| 479 |
| 460 return true; | 480 return true; |
| 461 } | 481 } |
| 462 | 482 |
| 463 std::string GoogleStreamingRemoteEngineTest::SerializeProtobufResponse( | 483 std::string GoogleStreamingRemoteEngineTest::SerializeProtobufResponse( |
| 464 const proto::SpeechRecognitionEvent& msg) { | 484 const proto::SpeechRecognitionEvent& msg) { |
| 465 std::string msg_string; | 485 std::string msg_string; |
| 466 msg.SerializeToString(&msg_string); | 486 msg.SerializeToString(&msg_string); |
| 467 | 487 |
| 468 // Prepend 4 byte prefix length indication to the protobuf message as | 488 // Prepend 4 byte prefix length indication to the protobuf message as |
| 469 // envisaged by the google streaming recognition webservice protocol. | 489 // envisaged by the google streaming recognition webservice protocol. |
| 470 msg_string.insert(0, ToBigEndian32(msg_string.size())); | 490 msg_string.insert(0, ToBigEndian32(msg_string.size())); |
| 471 return msg_string; | 491 return msg_string; |
| 472 } | 492 } |
| 473 | 493 |
| 474 std::string GoogleStreamingRemoteEngineTest::ToBigEndian32(uint32 value) { | 494 std::string GoogleStreamingRemoteEngineTest::ToBigEndian32(uint32 value) { |
| 475 char raw_data[4]; | 495 char raw_data[4]; |
| 476 raw_data[0] = static_cast<uint8>((value >> 24) & 0xFF); | 496 raw_data[0] = static_cast<uint8>((value >> 24) & 0xFF); |
| 477 raw_data[1] = static_cast<uint8>((value >> 16) & 0xFF); | 497 raw_data[1] = static_cast<uint8>((value >> 16) & 0xFF); |
| 478 raw_data[2] = static_cast<uint8>((value >> 8) & 0xFF); | 498 raw_data[2] = static_cast<uint8>((value >> 8) & 0xFF); |
| 479 raw_data[3] = static_cast<uint8>(value & 0xFF); | 499 raw_data[3] = static_cast<uint8>(value & 0xFF); |
| 480 return std::string(raw_data, sizeof(raw_data)); | 500 return std::string(raw_data, sizeof(raw_data)); |
| 481 } | 501 } |
| 482 | 502 |
| 483 } // namespace content | 503 } // namespace content |
| OLD | NEW |