Index: content/browser/speech/speech_recognizer_impl.cc |
diff --git a/content/browser/speech/speech_recognizer_impl.cc b/content/browser/speech/speech_recognizer_impl.cc |
index 65ec1fbb74ca4ac6e69911a282e8a527a75cada9..1d1c635aee965cf7e84c0f0bcf7e9c24cd3ad11a 100644 |
--- a/content/browser/speech/speech_recognizer_impl.cc |
+++ b/content/browser/speech/speech_recognizer_impl.cc |
@@ -81,7 +81,7 @@ SpeechRecognizerImpl::SpeechRecognizerImpl( |
is_dispatching_event_(false), |
is_single_shot_(is_single_shot), |
state_(STATE_IDLE) { |
- DCHECK(listener_ != NULL); |
+ DCHECK(this->listener() != NULL); |
bulach
2013/06/11 07:24:48
nit: move the DCHECK to the base class, and there
janx
2013/06/12 14:47:14
Moved DCHECK to base class.
|
DCHECK(recognition_engine_ != NULL); |
if (is_single_shot) { |
// In single shot recognition, the session is automatically ended after: |
@@ -396,7 +396,7 @@ SpeechRecognizerImpl::StartRecording(const FSMEventArgs&) { |
DVLOG(1) << "SpeechRecognizerImpl starting audio capture."; |
num_samples_recorded_ = 0; |
audio_level_ = 0; |
- listener_->OnRecognitionStart(session_id_); |
+ listener()->OnRecognitionStart(session_id()); |
if (!audio_manager->HasAudioInputDevices()) { |
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO, |
@@ -429,7 +429,7 @@ SpeechRecognizerImpl::StartRecognitionEngine(const FSMEventArgs& event_args) { |
// started and the delegate notified about the event. |
DCHECK(recognition_engine_.get() != NULL); |
recognition_engine_->StartRecognition(); |
- listener_->OnAudioStart(session_id_); |
+ listener()->OnAudioStart(session_id()); |
// This is a little hack, since TakeAudioChunk() is already called by |
// ProcessAudioPipeline(). It is the best tradeoff, unless we allow dropping |
@@ -443,7 +443,7 @@ SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs&) { |
DCHECK(endpointer_.IsEstimatingEnvironment()); |
if (GetElapsedTimeMs() >= kEndpointerEstimationTimeMs) { |
endpointer_.SetUserInputMode(); |
- listener_->OnEnvironmentEstimationComplete(session_id_); |
+ listener()->OnEnvironmentEstimationComplete(session_id()); |
return STATE_WAITING_FOR_SPEECH; |
} else { |
return STATE_ESTIMATING_ENVIRONMENT; |
@@ -453,7 +453,7 @@ SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs&) { |
SpeechRecognizerImpl::FSMState |
SpeechRecognizerImpl::DetectUserSpeechOrTimeout(const FSMEventArgs&) { |
if (endpointer_.DidStartReceivingSpeech()) { |
- listener_->OnSoundStart(session_id_); |
+ listener()->OnSoundStart(session_id()); |
return STATE_RECOGNIZING; |
} else if (GetElapsedTimeMs() >= kNoSpeechTimeoutMs) { |
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NO_SPEECH)); |
@@ -477,9 +477,9 @@ SpeechRecognizerImpl::StopCaptureAndWaitForResult(const FSMEventArgs&) { |
recognition_engine_->AudioChunksEnded(); |
if (state_ > STATE_WAITING_FOR_SPEECH) |
- listener_->OnSoundEnd(session_id_); |
+ listener()->OnSoundEnd(session_id()); |
- listener_->OnAudioEnd(session_id_); |
+ listener()->OnAudioEnd(session_id()); |
return STATE_WAITING_FINAL_RESULT; |
} |
@@ -514,15 +514,15 @@ SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::Abort( |
} |
if (state_ > STATE_WAITING_FOR_SPEECH && state_ < STATE_WAITING_FINAL_RESULT) |
- listener_->OnSoundEnd(session_id_); |
+ listener()->OnSoundEnd(session_id()); |
if (state_ > STATE_STARTING && state_ < STATE_WAITING_FINAL_RESULT) |
- listener_->OnAudioEnd(session_id_); |
+ listener()->OnAudioEnd(session_id()); |
if (error.code != SPEECH_RECOGNITION_ERROR_NONE) |
- listener_->OnRecognitionError(session_id_, error); |
+ listener()->OnRecognitionError(session_id(), error); |
- listener_->OnRecognitionEnd(session_id_); |
+ listener()->OnRecognitionEnd(session_id()); |
return STATE_ENDED; |
} |
@@ -542,14 +542,14 @@ SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::ProcessIntermediateResult( |
if (state_ == STATE_ESTIMATING_ENVIRONMENT) { |
DCHECK(endpointer_.IsEstimatingEnvironment()); |
endpointer_.SetUserInputMode(); |
- listener_->OnEnvironmentEstimationComplete(session_id_); |
+ listener()->OnEnvironmentEstimationComplete(session_id()); |
} else if (state_ == STATE_WAITING_FOR_SPEECH) { |
- listener_->OnSoundStart(session_id_); |
+ listener()->OnSoundStart(session_id()); |
} else { |
DCHECK_EQ(STATE_RECOGNIZING, state_); |
} |
- listener_->OnRecognitionResults(session_id_, event_args.engine_results); |
+ listener()->OnRecognitionResults(session_id(), event_args.engine_results); |
return STATE_RECOGNIZING; |
} |
@@ -570,7 +570,7 @@ SpeechRecognizerImpl::ProcessFinalResult(const FSMEventArgs& event_args) { |
} |
if (provisional_results_pending) { |
- listener_->OnRecognitionResults(session_id_, results); |
+ listener()->OnRecognitionResults(session_id(), results); |
// We don't end the recognition if a provisional result is received in |
// STATE_WAITING_FINAL_RESULT. A definitive result will come next and will |
// end the recognition. |
@@ -589,10 +589,10 @@ SpeechRecognizerImpl::ProcessFinalResult(const FSMEventArgs& event_args) { |
// 4. The speech recognition engine, therefore, emits an empty result to |
// notify that the recognition is ended with no error, yet neither any |
// further result. |
- listener_->OnRecognitionResults(session_id_, results); |
+ listener()->OnRecognitionResults(session_id(), results); |
} |
- listener_->OnRecognitionEnd(session_id_); |
+ listener()->OnRecognitionEnd(session_id()); |
return STATE_ENDED; |
} |
@@ -642,8 +642,8 @@ void SpeechRecognizerImpl::UpdateSignalAndNoiseLevels(const float& rms, |
noise_level = std::min(std::max(0.0f, noise_level), |
kAudioMeterRangeMaxUnclipped); |
- listener_->OnAudioLevelsChange( |
- session_id_, clip_detected ? 1.0f : audio_level_, noise_level); |
+ listener()->OnAudioLevelsChange( |
+ session_id(), clip_detected ? 1.0f : audio_level_, noise_level); |
} |
void SpeechRecognizerImpl::SetAudioManagerForTests( |