Chromium Code Reviews| Index: chromecast/public/media/media_pipeline_backend.h |
| diff --git a/chromecast/public/media/media_pipeline_backend.h b/chromecast/public/media/media_pipeline_backend.h |
| index cf64acce20882ecb004c6bc5f3af8a854b804f53..a0de80a4156152b3ff74a9a9a503f9f61cf4a8c5 100644 |
| --- a/chromecast/public/media/media_pipeline_backend.h |
| +++ b/chromecast/public/media/media_pipeline_backend.h |
| @@ -5,35 +5,179 @@ |
| #ifndef CHROMECAST_PUBLIC_MEDIA_MEDIA_PIPELINE_BACKEND_H_ |
| #define CHROMECAST_PUBLIC_MEDIA_MEDIA_PIPELINE_BACKEND_H_ |
| +#include "decoder_config.h" |
| + |
| namespace chromecast { |
| +struct Size; |
| namespace media { |
| +class CastDecoderBuffer; |
| +class DecryptContext; |
| -class AudioPipelineDevice; |
| -class MediaClockDevice; |
| -struct MediaPipelineDeviceParams; |
| -class VideoPipelineDevice; |
| - |
| -// Interface for creating and managing ownership of platform-specific clock, |
| -// audio and video devices. cast_shell owns the MediaPipelineBackend for |
| -// as long as it is needed; the implementation is responsible for |
| -// tearing down the individual components correctly when it is destroyed. |
| +// Interface for platform-specific output of media. |
| // A new MediaPipelineBackend will be instantiated for each media player |
| -// instance. |
| +// instance and raw audio stream. If a backend has both video and audio |
| +// decoders, they must be synchronized. Platforms must support 1 |
|
slan
2015/10/06 19:34:00
We should be able to query the backend for the num
kmackay
2015/10/06 21:44:55
Added comment that the backend can return nullptr
slan
2015/10/06 22:15:06
Thanks.
|
| +// synchronized backend and >= 3 backends with audio simultaneously. |
| +// The basic usage pattern is: |
| +// * Decoder objects created, then Initialize called |
| +// * Start/Stop/Pause/Resume used to manage playback state |
| +// * Decoder objects are used to pass actual stream data buffers |
| +// * Backend must make appropriate callbacks on the provided Delegate |
| +// All functions will be called on the media thread. Delegate callbacks |
| +// must be made on this thread also (using provided TaskRunner if necessary). |
| class MediaPipelineBackend { |
| public: |
| + // Return code for PushBuffer |
| + enum BufferStatus { |
| + kBufferSuccess, |
| + kBufferFailed, |
| + kBufferPending, |
| + }; |
| + |
| + class Decoder { |
| + public: |
| + typedef MediaPipelineBackend::BufferStatus BufferStatus; |
| + |
| + // Statistics (computed since pipeline last started playing). |
| + // For video, a sample is defined as a frame. |
| + struct Statistics { |
| + uint64_t decoded_bytes; |
| + uint64_t decoded_samples; |
| + uint64_t dropped_samples; |
| + }; |
| + |
| + virtual ~Decoder() {} |
| + |
| + // Pushes a buffer of data for decoding. If the implementation cannot push |
| + // the buffer now, it must store the buffer, return |kBufferPending| and |
| + // execute the push at a later time when it becomes possible to do so. The |
| + // implementation must then invoke Client::OnPushComplete. Pushing a |
| + // pending buffer should be aborted if Stop is called; OnPushAudioComplete |
| + // need not be invoked in this case. |
| + // If |kBufferPending| is returned, the pipeline will stop pushing any |
| + // further buffers until OnPushComplete is invoked. |
| + // OnPushComplete should be only be invoked to indicate completion of a |
| + // pending buffer push - not for the immediate |kBufferSuccess| return case. |
| + // The decrypt_context and buffer's lifetimes are managed by the caller code |
| + // - they MUST NOT be deleted by the MediaPipelineBackend implementation, |
| + // and MUST NOT be dereferenced after completion of buffer push (i.e. |
| + // kBufferSuccess/kBufferFailure for synchronous completion, OnPushComplete |
| + // for kBufferPending case). |
| + virtual BufferStatus PushBuffer(DecryptContext* decrypt_context, |
| + CastDecoderBuffer* buffer) = 0; |
| + |
| + // Returns the playback statistics since this decoder's creation. Only |
| + // called when playing or paused. |
| + virtual void GetStatistics(Statistics* statistics) = 0; |
| + }; |
| + |
| + class AudioDecoder : public Decoder { |
| + public: |
| + // Info on pipeline latency: amount of data in pipeline not rendered yet, |
| + // and timestamp of system clock (must be CLOCK_MONOTONIC) at which delay |
| + // measurement was taken. Both times in microseconds. |
| + struct RenderingDelay { |
| + RenderingDelay() |
| + : delay_microseconds(INT64_MIN), timestamp_microseconds(INT64_MIN) {} |
| + RenderingDelay(int64_t delay_microseconds_in, |
| + int64_t timestamp_microseconds_in) |
| + : delay_microseconds(delay_microseconds_in), |
| + timestamp_microseconds(timestamp_microseconds_in) {} |
| + int64_t delay_microseconds; |
| + int64_t timestamp_microseconds; |
| + }; |
| + |
| + // Provides the audio configuration. Called once before the backend is |
| + // initialized, and again any time the configuration changes (in any state). |
| + // Returns true if the configuration is a supported configuration. |
| + virtual bool SetConfig(const AudioConfig& config) = 0; |
| + |
| + // Sets the volume multiplier for this audio stream. |
| + // The multiplier is in the range [0.0, 1.0]. If not called, a default |
| + // multiplier of 1.0 is assumed. Returns true if successful. |
| + virtual bool SetVolume(float multiplier) = 0; |
| + |
| + // Returns the pipeline latency: i.e. the amount of data |
| + // in the pipeline that have not been rendered yet, in microseconds. |
| + // Returns delay = INT64_MIN if the latency is not available. |
| + virtual RenderingDelay GetRenderingDelay() = 0; |
| + }; |
| + |
| + class VideoDecoder : public Decoder { |
| + public: |
| + // Provides the video configuration. Called once before the backend is |
| + // initialized, and again any time the configuration changes (in any state). |
| + // Returns true if the configuration is a supported configuration. |
| + virtual bool SetConfig(const VideoConfig& config) = 0; |
| + }; |
| + |
| + // Delegate methods must be called on the main CMA thread. |
| + class Delegate { |
| + public: |
| + // Must be called when video resolution change is detected by decoder. |
| + virtual void OnVideoResolutionChanged(Decoder* decoder, |
|
slan
2015/10/06 19:34:00
Why not VideoDecoder* here? Does a Delegate for an
kmackay
2015/10/06 21:44:55
Done.
|
| + const Size& size) = 0; |
| + |
| + // See comments on PushBuffer. Must not be called with kBufferPending. |
| + virtual void OnPushBufferComplete(Decoder* decoder, |
| + BufferStatus status) = 0; |
| + |
| + // Must be called after an end-of-stream buffer has been rendered. |
|
slan
2015/10/06 19:34:00
Define "rendered" here: decoded?
kmackay
2015/10/06 21:44:55
Rendered to output (ie, sent to the output hardwar
|
| + virtual void OnEndOfStream(Decoder* decoder) = 0; |
| + |
| + // May be called if a decoder error occurs. No more calls to PushBuffer() |
| + // will be made after this is called. |
| + virtual void OnDecoderError(Decoder* decoder) = 0; |
| + |
| + protected: |
| + virtual ~Delegate() {} |
| + }; |
| + |
| virtual ~MediaPipelineBackend() {} |
| - // Returns the platform-specific pipeline clock. |
| - virtual MediaClockDevice* GetClock() = 0; |
| + // Creates a new AudioDecoder attached to this pipeline. MediaPipelineBackend |
| + // maintains ownership of the decoder object (and must not delete before it's |
| + // destroyed). Will be called zero or more times, all calls made before |
| + // Initialize. |
| + virtual AudioDecoder* CreateAudioDecoder() = 0; |
| + |
| + // Creates a new VideoDecoder attached to this pipeline. MediaPipelineBackend |
| + // maintains ownership of the decoder object (and must not delete before it's |
| + // destroyed). Will be called zero or more times, all calls made before |
| + // Initialize. Note: Even if your backend only supports audio, you must |
| + // provide a default implementation of VideoDecoder; one way to do this is to |
| + // inherit from MediaPipelineBackendDefault. |
| + virtual VideoDecoder* CreateVideoDecoder() = 0; |
| + |
| + // Initializes the backend. This will be called once, after Decoder creation |
| + // but before all other functions. Hardware resources for all decoders should |
| + // be acquired here. Backend is then considered in Initialized state. |
| + // Returns false for failure. |
| + virtual bool Initialize(Delegate* delegate) = 0; |
| + |
| + // Places pipeline into playing state. Playback will start at given time once |
| + // buffers are pushed. Called only when in Initialized state. |
| + virtual bool Start(int64_t start_pts) = 0; |
| + |
| + // Returns pipeline to 'Initialized' state. May be called while playing or |
| + // paused. Buffers cannot be pushed in Initialized state. |
| + virtual bool Stop() = 0; |
| + |
| + // Pauses media playback. Called only when in playing state. |
| + virtual bool Pause() = 0; |
| + |
| + // Resumes media playback. Called only when in paused state. |
| + virtual bool Resume() = 0; |
| - // Returns the platform-specific audio backend. |
| - virtual AudioPipelineDevice* GetAudio() = 0; |
| + // Gets the current playback time. |
| + virtual int64_t GetCurrentPts() = 0; |
| - // Returns the platform-specific video backend. |
| - virtual VideoPipelineDevice* GetVideo() = 0; |
| + // Sets the playback rate. |rate| > 0. If this is not called, a default rate |
| + // of 1.0 is assumed. Returns true if successful. |
| + virtual bool SetPlaybackRate(float rate) = 0; |
| }; |
| } // namespace media |
| } // namespace chromecast |
| -#endif // CHROMECAST_MEDIA_CMA_BACKEND_MEDIA_PIPELINE_DEVICE_FACTORY_H_ |
| +#endif // CHROMECAST_PUBLIC_MEDIA_MEDIA_PIPELINE_BACKEND_H_ |