| Index: content/public/browser/speech_recognition_manager.h
|
| diff --git a/content/public/browser/speech_recognition_manager.h b/content/public/browser/speech_recognition_manager.h
|
| index 9c94999529afed9e1895fda494d5851c518d4c49..b2a2056a4f22f5435f5e8a2c3ef6eee3a2e9e10c 100644
|
| --- a/content/public/browser/speech_recognition_manager.h
|
| +++ b/content/public/browser/speech_recognition_manager.h
|
| @@ -6,28 +6,69 @@
|
| #define CONTENT_PUBLIC_BROWSER_SPEECH_RECOGNITION_MANAGER_H_
|
|
|
| #include "base/string16.h"
|
| +#include "base/callback.h"
|
| #include "content/common/content_export.h"
|
| +#include "content/public/common/speech_recognition_result.h"
|
|
|
| namespace content {
|
|
|
| -// This is the gatekeeper for speech recognition in the browser process. It
|
| -// handles requests received from various render views and makes sure only one
|
| -// of them can use speech recognition at a time. It also sends recognition
|
| -// results and status events to the render views when required.
|
| -class SpeechRecognitionManager {
|
| +class SpeechRecognitionEventListener;
|
| +struct SpeechRecognitionSessionConfig;
|
| +struct SpeechRecognitionSessionContext;
|
| +
|
| +// The SpeechRecognitionManager (SRM) is a singleton class that handles SR
|
| +// functionalities within Chrome. Everyone that needs to perform SR should
|
| +// interface exclusively with the SRM, receiving events through the callback
|
| +// interface SpeechRecognitionEventListener.
|
| +// Since many different sources can use SR in different times (some overlapping
|
| +// is allowed while waiting for results), the SRM has the further responsibility
|
| +// of handling separately and reliably (taking into account also call sequences
|
| +// that might not make sense, e.g., two subsequent AbortSession calls).
|
| +// In this sense a session, within the SRM, models the ongoing evolution of a
|
| +// SR request from the viewpoint of the end-user, abstracting all the concrete
|
| +// operations that must be carried out, that will be handled by inner classes.
|
| +class CONTENT_EXPORT SpeechRecognitionManager {
|
| public:
|
| + static const int kSessionIDInvalid;
|
| +
|
| // Returns the singleton instance.
|
| - CONTENT_EXPORT static SpeechRecognitionManager* GetInstance();
|
| + static SpeechRecognitionManager* GetInstance();
|
| +
|
| + // Creates a new recognition session.
|
| + virtual int CreateSession(const SpeechRecognitionSessionConfig& config,
|
| + SpeechRecognitionEventListener* listener) = 0;
|
| +
|
| + // Starts/restarts recognition for an existing session.
|
| + virtual void StartSession(int session_id) = 0;
|
| +
|
| + // Aborts recognition for an existing session, without providing any result.
|
| + virtual void AbortSession(int session_id) = 0;
|
| +
|
| + // Aborts all sessions for a given listener, without providing any result.
|
| + virtual void AbortAllSessionsForListener(
|
| + SpeechRecognitionEventListener* listener) = 0;
|
| +
|
| + // Stops audio capture for an existing session. The audio captured before the
|
| + // call will be processed, possibly ending up with a result.
|
| + virtual void StopAudioCaptureForSession(int session_id) = 0;
|
|
|
| - // Starts/restarts recognition for an existing request.
|
| - virtual void StartRecognitionForRequest(int session_id) = 0;
|
| + // Detaches the session preventing it from interacting further with the
|
| + // browser (typically invoked when the user clicks outside the speech UI).
|
| + // The session will be silently continued in background if possible (in the
|
| + // case it already finished capturing audio and was just waiting for the
|
| + // result) or will be aborted if user interaction (e.g., audio recording) was
|
| + // involved at the time DetachSession was called.
|
| + virtual void DetachSession(int session_id) = 0;
|
|
|
| - // Cancels recognition for an existing request.
|
| - virtual void CancelRecognitionForRequest(int session_id) = 0;
|
| + // Retrieves the context associated to a session.
|
| + virtual SpeechRecognitionSessionContext GetSessionContext(
|
| + int session_id) const = 0;
|
|
|
| - // Called when the user clicks outside the speech input UI causing it to close
|
| - // and possibly have speech input go to another element.
|
| - virtual void FocusLostForRequest(int session_id) = 0;
|
| + // Looks-up an existing session using a caller-provided matcher function.
|
| + virtual int LookupSessionByContext(
|
| + base::Callback<bool(
|
| + const content::SpeechRecognitionSessionContext&)> matcher)
|
| + const = 0;
|
|
|
| // Returns true if the OS reports existence of audio recording devices.
|
| virtual bool HasAudioInputDevices() = 0;
|
|
|