Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(76)

Side by Side Diff: content/browser/speech/speech_recognizer_impl.cc

Issue 9663066: Refactoring of chrome speech recognition architecture (CL1.3) (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fixed according to last Satish comments. Created 8 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/browser/speech/speech_recognizer_impl.h" 5 #include "content/browser/speech/speech_recognizer_impl.h"
6 6
7 #include "base/bind.h" 7 #include "base/bind.h"
8 #include "base/time.h" 8 #include "base/time.h"
9 #include "content/browser/browser_main_loop.h" 9 #include "content/browser/browser_main_loop.h"
10 #include "content/browser/speech/audio_buffer.h" 10 #include "content/browser/speech/audio_buffer.h"
11 #include "content/browser/speech/google_one_shot_remote_engine.h"
12 #include "content/public/browser/browser_thread.h"
11 #include "content/public/browser/speech_recognition_event_listener.h" 13 #include "content/public/browser/speech_recognition_event_listener.h"
12 #include "content/public/browser/browser_thread.h" 14 #include "content/public/browser/speech_recognizer.h"
13 #include "content/public/common/speech_recognition_result.h" 15 #include "content/public/common/speech_recognition_result.h"
14 #include "net/url_request/url_request_context_getter.h" 16 #include "net/url_request/url_request_context_getter.h"
15 17
16 using content::BrowserMainLoop; 18 using content::BrowserMainLoop;
17 using content::BrowserThread; 19 using content::BrowserThread;
20 using content::SpeechRecognitionError;
18 using content::SpeechRecognitionEventListener; 21 using content::SpeechRecognitionEventListener;
22 using content::SpeechRecognitionResult;
19 using content::SpeechRecognizer; 23 using content::SpeechRecognizer;
20 using media::AudioInputController; 24 using media::AudioInputController;
21 using std::string;
22 25
23 namespace { 26 namespace {
24 27
25 // The following constants are related to the volume level indicator shown in 28 // The following constants are related to the volume level indicator shown in
26 // the UI for recorded audio. 29 // the UI for recorded audio.
27 // Multiplier used when new volume is greater than previous level. 30 // Multiplier used when new volume is greater than previous level.
28 const float kUpSmoothingFactor = 1.0f; 31 const float kUpSmoothingFactor = 1.0f;
29 // Multiplier used when new volume is lesser than previous level. 32 // Multiplier used when new volume is lesser than previous level.
30 const float kDownSmoothingFactor = 0.7f; 33 const float kDownSmoothingFactor = 0.7f;
31 // RMS dB value of a maximum (unclipped) sine wave for int16 samples. 34 // RMS dB value of a maximum (unclipped) sine wave for int16 samples.
(...skipping 25 matching lines...) Expand all
57 60
58 SpeechRecognizer* SpeechRecognizer::Create( 61 SpeechRecognizer* SpeechRecognizer::Create(
59 SpeechRecognitionEventListener* listener, 62 SpeechRecognitionEventListener* listener,
60 int caller_id, 63 int caller_id,
61 const std::string& language, 64 const std::string& language,
62 const std::string& grammar, 65 const std::string& grammar,
63 net::URLRequestContextGetter* context_getter, 66 net::URLRequestContextGetter* context_getter,
64 bool filter_profanities, 67 bool filter_profanities,
65 const std::string& hardware_info, 68 const std::string& hardware_info,
66 const std::string& origin_url) { 69 const std::string& origin_url) {
67 return new speech::SpeechRecognizerImpl( 70 return new speech::SpeechRecognizerImpl(listener,
68 listener, caller_id, language, grammar, context_getter, 71 caller_id,
69 filter_profanities, hardware_info, origin_url); 72 language,
73 grammar,
74 context_getter,
75 filter_profanities,
76 hardware_info,
77 origin_url);
70 } 78 }
71 79
72 namespace speech { 80 namespace speech {
73 81
74 const int SpeechRecognizerImpl::kAudioSampleRate = 16000; 82 const int SpeechRecognizerImpl::kAudioSampleRate = 16000;
75 const int SpeechRecognizerImpl::kAudioPacketIntervalMs = 100;
76 const ChannelLayout SpeechRecognizerImpl::kChannelLayout = CHANNEL_LAYOUT_MONO; 83 const ChannelLayout SpeechRecognizerImpl::kChannelLayout = CHANNEL_LAYOUT_MONO;
77 const int SpeechRecognizerImpl::kNumBitsPerAudioSample = 16; 84 const int SpeechRecognizerImpl::kNumBitsPerAudioSample = 16;
78 const int SpeechRecognizerImpl::kNoSpeechTimeoutSec = 8; 85 const int SpeechRecognizerImpl::kNoSpeechTimeoutMs = 8000;
79 const int SpeechRecognizerImpl::kEndpointerEstimationTimeMs = 300; 86 const int SpeechRecognizerImpl::kEndpointerEstimationTimeMs = 300;
80 87
81 SpeechRecognizerImpl::SpeechRecognizerImpl( 88 SpeechRecognizerImpl::SpeechRecognizerImpl(
82 SpeechRecognitionEventListener* listener, 89 SpeechRecognitionEventListener* listener,
83 int caller_id, 90 int caller_id,
84 const std::string& language, 91 const std::string& language,
85 const std::string& grammar, 92 const std::string& grammar,
86 net::URLRequestContextGetter* context_getter, 93 net::URLRequestContextGetter* context_getter,
87 bool filter_profanities, 94 bool filter_profanities,
88 const std::string& hardware_info, 95 const std::string& hardware_info,
89 const std::string& origin_url) 96 const std::string& origin_url)
90 : listener_(listener), 97 : listener_(listener),
98 testing_audio_manager_(NULL),
99 endpointer_(kAudioSampleRate),
100 context_getter_(context_getter),
91 caller_id_(caller_id), 101 caller_id_(caller_id),
92 language_(language), 102 language_(language),
93 grammar_(grammar), 103 grammar_(grammar),
94 filter_profanities_(filter_profanities), 104 filter_profanities_(filter_profanities),
95 hardware_info_(hardware_info), 105 hardware_info_(hardware_info),
96 origin_url_(origin_url), 106 origin_url_(origin_url) {
97 context_getter_(context_getter), 107 DCHECK(listener_ != NULL);
98 codec_(AudioEncoder::CODEC_FLAC),
99 encoder_(NULL),
100 endpointer_(kAudioSampleRate),
101 num_samples_recorded_(0),
102 audio_level_(0.0f),
103 audio_manager_(NULL) {
104 endpointer_.set_speech_input_complete_silence_length( 108 endpointer_.set_speech_input_complete_silence_length(
105 base::Time::kMicrosecondsPerSecond / 2); 109 base::Time::kMicrosecondsPerSecond / 2);
106 endpointer_.set_long_speech_input_complete_silence_length( 110 endpointer_.set_long_speech_input_complete_silence_length(
107 base::Time::kMicrosecondsPerSecond); 111 base::Time::kMicrosecondsPerSecond);
108 endpointer_.set_long_speech_length(3 * base::Time::kMicrosecondsPerSecond); 112 endpointer_.set_long_speech_length(3 * base::Time::kMicrosecondsPerSecond);
109 endpointer_.StartSession(); 113 endpointer_.StartSession();
110 } 114 }
111 115
112 SpeechRecognizerImpl::~SpeechRecognizerImpl() { 116 SpeechRecognizerImpl::~SpeechRecognizerImpl() {
113 // Recording should have stopped earlier due to the endpointer or 117 // Recording should have stopped earlier due to the endpointer or
114 // |StopRecording| being called. 118 // |StopRecording| being called.
115 DCHECK(!audio_controller_.get()); 119 DCHECK(!audio_controller_.get());
116 DCHECK(!request_.get() || !request_->HasPendingRequest()); 120 DCHECK(!recognition_engine_.get() ||
117 DCHECK(!encoder_.get()); 121 !recognition_engine_->IsRecognitionPending());
118 endpointer_.EndSession(); 122 endpointer_.EndSession();
119 } 123 }
120 124
121 bool SpeechRecognizerImpl::StartRecognition() { 125 void SpeechRecognizerImpl::StartRecognition() {
122 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); 126 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
123 DCHECK(!audio_controller_.get()); 127 DCHECK(!audio_controller_.get());
124 DCHECK(!request_.get() || !request_->HasPendingRequest()); 128 DCHECK(!recognition_engine_.get() ||
125 DCHECK(!encoder_.get()); 129 !recognition_engine_->IsRecognitionPending());
126 130
127 // The endpointer needs to estimate the environment/background noise before 131 // The endpointer needs to estimate the environment/background noise before
128 // starting to treat the audio as user input. In |HandleOnData| we wait until 132 // starting to treat the audio as user input. In |HandleOnData| we wait until
129 // such time has passed before switching to user input mode. 133 // such time has passed before switching to user input mode.
130 endpointer_.SetEnvironmentEstimationMode(); 134 endpointer_.SetEnvironmentEstimationMode();
131 135
132 encoder_.reset(AudioEncoder::Create(codec_, kAudioSampleRate, 136 AudioManager* audio_manager = (testing_audio_manager_ != NULL) ?
133 kNumBitsPerAudioSample)); 137 testing_audio_manager_ :
134 int samples_per_packet = (kAudioSampleRate * kAudioPacketIntervalMs) / 1000; 138 BrowserMainLoop::GetAudioManager();
139 const int samples_per_packet = kAudioSampleRate *
140 GoogleOneShotRemoteEngine::kAudioPacketIntervalMs / 1000;
135 AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, 141 AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
136 kAudioSampleRate, kNumBitsPerAudioSample, 142 kAudioSampleRate, kNumBitsPerAudioSample,
137 samples_per_packet); 143 samples_per_packet);
138 audio_controller_ = AudioInputController::Create( 144 audio_controller_ = AudioInputController::Create(audio_manager, this, params);
139 audio_manager_ ? audio_manager_ : BrowserMainLoop::GetAudioManager(),
140 this, params);
141 DCHECK(audio_controller_.get()); 145 DCHECK(audio_controller_.get());
142 VLOG(1) << "SpeechRecognizer starting record."; 146 VLOG(1) << "SpeechRecognizer starting record.";
143 num_samples_recorded_ = 0; 147 num_samples_recorded_ = 0;
144 audio_controller_->Record(); 148 audio_controller_->Record();
145
146 return true;
147 } 149 }
148 150
149 void SpeechRecognizerImpl::AbortRecognition() { 151 void SpeechRecognizerImpl::AbortRecognition() {
150 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); 152 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
151 DCHECK(audio_controller_.get() || request_.get()); 153 DCHECK(audio_controller_.get() || recognition_engine_.get());
152 154
153 // Stop recording if required. 155 // Stop recording if required.
154 if (audio_controller_.get()) { 156 if (audio_controller_.get()) {
155 CloseAudioControllerSynchronously(); 157 CloseAudioControllerSynchronously();
156 } 158 }
157 159
158 VLOG(1) << "SpeechRecognizer canceling recognition."; 160 VLOG(1) << "SpeechRecognizer canceling recognition.";
159 encoder_.reset(); 161 recognition_engine_.reset();
160 request_.reset();
161 } 162 }
162 163
163 void SpeechRecognizerImpl::StopAudioCapture() { 164 void SpeechRecognizerImpl::StopAudioCapture() {
164 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); 165 DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
165 166
166 // If audio recording has already stopped and we are in recognition phase, 167 // If audio recording has already stopped and we are in recognition phase,
167 // silently ignore any more calls to stop recording. 168 // silently ignore any more calls to stop recording.
168 if (!audio_controller_.get()) 169 if (!audio_controller_.get())
169 return; 170 return;
170 171
171 CloseAudioControllerSynchronously(); 172 CloseAudioControllerSynchronously();
172
173 listener_->OnSoundEnd(caller_id_); 173 listener_->OnSoundEnd(caller_id_);
174 listener_->OnAudioEnd(caller_id_); 174 listener_->OnAudioEnd(caller_id_);
175 175
176 // UploadAudioChunk requires a non-empty final buffer. So we encode a packet
177 // of silence in case encoder had no data already.
178 std::vector<short> samples((kAudioSampleRate * kAudioPacketIntervalMs) /
179 1000);
180 AudioChunk dummy_chunk(reinterpret_cast<uint8*>(&samples[0]),
181 samples.size() * sizeof(short),
182 encoder_->bits_per_sample() / 8);
183 encoder_->Encode(dummy_chunk);
184 encoder_->Flush();
185 scoped_ptr<AudioChunk> encoded_data(encoder_->GetEncodedDataAndClear());
186 DCHECK(!encoded_data->IsEmpty());
187 encoder_.reset();
188
189 // If we haven't got any audio yet end the recognition sequence here. 176 // If we haven't got any audio yet end the recognition sequence here.
190 if (request_ == NULL) { 177 if (recognition_engine_ == NULL) {
191 // Guard against the listener freeing us until we finish our job. 178 // Guard against the listener freeing us until we finish our job.
192 scoped_refptr<SpeechRecognizerImpl> me(this); 179 scoped_refptr<SpeechRecognizerImpl> me(this);
193 listener_->OnRecognitionEnd(caller_id_); 180 listener_->OnRecognitionEnd(caller_id_);
194 } else { 181 } else {
195 request_->UploadAudioChunk(*encoded_data, true /* is_last_chunk */); 182 recognition_engine_->AudioChunksEnded();
196 } 183 }
197 } 184 }
198 185
199 // Invoked in the audio thread. 186 // Invoked in the audio thread.
200 void SpeechRecognizerImpl::OnError(AudioInputController* controller, 187 void SpeechRecognizerImpl::OnError(AudioInputController* controller,
201 int error_code) { 188 int error_code) {
202 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, 189 BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
203 base::Bind(&SpeechRecognizerImpl::HandleOnError, 190 base::Bind(&SpeechRecognizerImpl::HandleOnError,
204 this, error_code)); 191 this, error_code));
205 } 192 }
(...skipping 24 matching lines...) Expand all
230 void SpeechRecognizerImpl::HandleOnData(AudioChunk* raw_audio) { 217 void SpeechRecognizerImpl::HandleOnData(AudioChunk* raw_audio) {
231 scoped_ptr<AudioChunk> free_raw_audio_on_return(raw_audio); 218 scoped_ptr<AudioChunk> free_raw_audio_on_return(raw_audio);
232 // Check if we are still recording and if not discard this buffer, as 219 // Check if we are still recording and if not discard this buffer, as
233 // recording might have been stopped after this buffer was posted to the queue 220 // recording might have been stopped after this buffer was posted to the queue
234 // by |OnData|. 221 // by |OnData|.
235 if (!audio_controller_.get()) 222 if (!audio_controller_.get())
236 return; 223 return;
237 224
238 bool speech_was_heard_before_packet = endpointer_.DidStartReceivingSpeech(); 225 bool speech_was_heard_before_packet = endpointer_.DidStartReceivingSpeech();
239 226
240 encoder_->Encode(*raw_audio);
241 float rms; 227 float rms;
242 endpointer_.ProcessAudio(*raw_audio, &rms); 228 endpointer_.ProcessAudio(*raw_audio, &rms);
243 bool did_clip = DetectClipping(*raw_audio); 229 bool did_clip = DetectClipping(*raw_audio);
244 num_samples_recorded_ += raw_audio->NumSamples(); 230 num_samples_recorded_ += raw_audio->NumSamples();
245 231
246 if (request_ == NULL) { 232 if (recognition_engine_ == NULL) {
247 // This was the first audio packet recorded, so start a request to the 233 // This was the first audio packet recorded, so start a request to the
248 // server to send the data and inform the listener. 234 // server to send the data and inform the listener.
249 listener_->OnAudioStart(caller_id_); 235 listener_->OnAudioStart(caller_id_);
250 request_.reset(new SpeechRecognitionRequest(context_getter_.get(), this)); 236 GoogleOneShotRemoteEngineConfig google_sr_config;
251 request_->Start(language_, grammar_, filter_profanities_, 237 google_sr_config.language = language_;
252 hardware_info_, origin_url_, encoder_->mime_type()); 238 google_sr_config.grammar = grammar_;
239 google_sr_config.audio_sample_rate = kAudioSampleRate;
240 google_sr_config.audio_num_bits_per_sample = kNumBitsPerAudioSample;
241 google_sr_config.filter_profanities = filter_profanities_;
242 google_sr_config.hardware_info = hardware_info_;
243 google_sr_config.origin_url = origin_url_;
244 GoogleOneShotRemoteEngine* google_sr_engine =
245 new GoogleOneShotRemoteEngine(context_getter_.get());
246 google_sr_engine->SetConfig(google_sr_config);
247 recognition_engine_.reset(google_sr_engine);
248 recognition_engine_->set_delegate(this);
249 recognition_engine_->StartRecognition();
253 } 250 }
254 251
255 scoped_ptr<AudioChunk> encoded_data(encoder_->GetEncodedDataAndClear()); 252 recognition_engine_->TakeAudioChunk(*raw_audio);
256 DCHECK(!encoded_data->IsEmpty());
257 request_->UploadAudioChunk(*encoded_data, false /* is_last_chunk */);
258 253
259 if (endpointer_.IsEstimatingEnvironment()) { 254 if (endpointer_.IsEstimatingEnvironment()) {
260 // Check if we have gathered enough audio for the endpointer to do 255 // Check if we have gathered enough audio for the endpointer to do
261 // environment estimation and should move on to detect speech/end of speech. 256 // environment estimation and should move on to detect speech/end of speech.
262 if (num_samples_recorded_ >= (kEndpointerEstimationTimeMs * 257 if (num_samples_recorded_ >= (kEndpointerEstimationTimeMs *
263 kAudioSampleRate) / 1000) { 258 kAudioSampleRate) / 1000) {
264 endpointer_.SetUserInputMode(); 259 endpointer_.SetUserInputMode();
265 listener_->OnEnvironmentEstimationComplete(caller_id_); 260 listener_->OnEnvironmentEstimationComplete(caller_id_);
266 } 261 }
267 return; // No more processing since we are still estimating environment. 262 return; // No more processing since we are still estimating environment.
268 } 263 }
269 264
270 // Check if we have waited too long without hearing any speech. 265 // Check if we have waited too long without hearing any speech.
271 bool speech_was_heard_after_packet = endpointer_.DidStartReceivingSpeech(); 266 bool speech_was_heard_after_packet = endpointer_.DidStartReceivingSpeech();
272 if (!speech_was_heard_after_packet && 267 if (!speech_was_heard_after_packet &&
273 num_samples_recorded_ >= kNoSpeechTimeoutSec * kAudioSampleRate) { 268 num_samples_recorded_ >= (kNoSpeechTimeoutMs / 1000) * kAudioSampleRate) {
274 InformErrorAndAbortRecognition( 269 InformErrorAndAbortRecognition(
275 content::SPEECH_RECOGNITION_ERROR_NO_SPEECH); 270 content::SPEECH_RECOGNITION_ERROR_NO_SPEECH);
276 return; 271 return;
277 } 272 }
278 273
279 if (!speech_was_heard_before_packet && speech_was_heard_after_packet) 274 if (!speech_was_heard_before_packet && speech_was_heard_after_packet)
280 listener_->OnSoundStart(caller_id_); 275 listener_->OnSoundStart(caller_id_);
281 276
282 // Calculate the input volume to display in the UI, smoothing towards the 277 // Calculate the input volume to display in the UI, smoothing towards the
283 // new level. 278 // new level.
(...skipping 11 matching lines...) Expand all
295 noise_level = std::min(std::max(0.0f, noise_level), 290 noise_level = std::min(std::max(0.0f, noise_level),
296 kAudioMeterRangeMaxUnclipped); 291 kAudioMeterRangeMaxUnclipped);
297 292
298 listener_->OnAudioLevelsChange(caller_id_, did_clip ? 1.0f : audio_level_, 293 listener_->OnAudioLevelsChange(caller_id_, did_clip ? 1.0f : audio_level_,
299 noise_level); 294 noise_level);
300 295
301 if (endpointer_.speech_input_complete()) 296 if (endpointer_.speech_input_complete())
302 StopAudioCapture(); 297 StopAudioCapture();
303 } 298 }
304 299
305 void SpeechRecognizerImpl::SetRecognitionResult( 300 void SpeechRecognizerImpl::OnSpeechRecognitionEngineResult(
306 const content::SpeechRecognitionResult& result) { 301 const content::SpeechRecognitionResult& result) {
307 if (result.error != content::SPEECH_RECOGNITION_ERROR_NONE) { 302 if (result.error != content::SPEECH_RECOGNITION_ERROR_NONE) {
308 InformErrorAndAbortRecognition(result.error); 303 InformErrorAndAbortRecognition(result.error);
309 return; 304 return;
310 } 305 }
311 306
312 // Guard against the listener freeing us until we finish our job. 307 // Guard against the listener freeing us until we finish our job.
313 scoped_refptr<SpeechRecognizerImpl> me(this); 308 scoped_refptr<SpeechRecognizerImpl> me(this);
314 listener_->OnRecognitionResult(caller_id_, result); 309 listener_->OnRecognitionResult(caller_id_, result);
315 listener_->OnRecognitionEnd(caller_id_); 310 listener_->OnRecognitionEnd(caller_id_);
316 } 311 }
317 312
313 void SpeechRecognizerImpl::OnSpeechRecognitionEngineError(
314 const content::SpeechRecognitionError& error) {
315 InformErrorAndAbortRecognition(error.code);
316 }
317
318 void SpeechRecognizerImpl::InformErrorAndAbortRecognition( 318 void SpeechRecognizerImpl::InformErrorAndAbortRecognition(
319 content::SpeechRecognitionErrorCode error) { 319 content::SpeechRecognitionErrorCode error) {
320 DCHECK_NE(error, content::SPEECH_RECOGNITION_ERROR_NONE); 320 DCHECK_NE(error, content::SPEECH_RECOGNITION_ERROR_NONE);
321 AbortRecognition(); 321 AbortRecognition();
322 322
323 // Guard against the listener freeing us until we finish our job. 323 // Guard against the listener freeing us until we finish our job.
324 scoped_refptr<SpeechRecognizerImpl> me(this); 324 scoped_refptr<SpeechRecognizerImpl> me(this);
325 listener_->OnRecognitionError(caller_id_, error); 325 listener_->OnRecognitionError(caller_id_, error);
326 } 326 }
327 327
328 void SpeechRecognizerImpl::CloseAudioControllerSynchronously() { 328 void SpeechRecognizerImpl::CloseAudioControllerSynchronously() {
329 VLOG(1) << "SpeechRecognizer stopping record."; 329 VLOG(1) << "SpeechRecognizer stopping record.";
330 330
331 // TODO(satish): investigate the possibility to utilize the closure 331 // TODO(satish): investigate the possibility to utilize the closure
332 // and switch to async. version of this method. Compare with how 332 // and switch to async. version of this method. Compare with how
333 // it's done in e.g. the AudioRendererHost. 333 // it's done in e.g. the AudioRendererHost.
334 base::WaitableEvent closed_event(true, false); 334 base::WaitableEvent closed_event(true, false);
335 audio_controller_->Close(base::Bind(&base::WaitableEvent::Signal, 335 audio_controller_->Close(base::Bind(&base::WaitableEvent::Signal,
336 base::Unretained(&closed_event))); 336 base::Unretained(&closed_event)));
337 closed_event.Wait(); 337 closed_event.Wait();
338 audio_controller_ = NULL; // Releases the ref ptr. 338 audio_controller_ = NULL; // Releases the ref ptr.
339 } 339 }
340 340
341 void SpeechRecognizerImpl::SetAudioManagerForTesting(
342 AudioManager* audio_manager) {
343 audio_manager_ = audio_manager;
344 }
345
346 bool SpeechRecognizerImpl::IsActive() const { 341 bool SpeechRecognizerImpl::IsActive() const {
347 return (request_.get() != NULL); 342 return (recognition_engine_.get() != NULL);
348 } 343 }
349 344
350 bool SpeechRecognizerImpl::IsCapturingAudio() const { 345 bool SpeechRecognizerImpl::IsCapturingAudio() const {
351 return (audio_controller_.get() != NULL); 346 return (audio_controller_.get() != NULL);
352 } 347 }
353 348
349 const SpeechRecognitionEngine&
350 SpeechRecognizerImpl::recognition_engine() const {
351 return *(recognition_engine_.get());
352 }
353
354 void SpeechRecognizerImpl::SetAudioManagerForTesting(
355 AudioManager* audio_manager) {
356 testing_audio_manager_ = audio_manager;
357 }
358
359
354 } // namespace speech 360 } // namespace speech
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698